From 10f21fa313d8ee8cd636b1e945b1a02893d303f8 Mon Sep 17 00:00:00 2001 From: Richard Liaw Date: Tue, 24 Sep 2019 15:46:56 -0700 Subject: [PATCH] [docs] Convert Examples to Gallery (#5414) --- .travis.yml | 2 +- ci/jenkins_tests/run_multi_node_tests.sh | 11 + doc/.gitignore | 1 + doc/Makefile | 3 +- doc/examples/README.rst | 0 doc/examples/cython/cython_main.py | 19 +- doc/examples/cython/setup.py | 15 +- doc/examples/hyperopt/hyperopt_adaptive.py | 154 --------- doc/examples/hyperopt/hyperopt_simple.py | 100 ------ doc/examples/hyperopt/objective.py | 127 -------- doc/examples/overview.rst | 34 ++ .../plot_example-a3c.rst} | 1 - doc/examples/plot_hyperparameter.py | 178 +++++++++++ .../plot_lbfgs.rst} | 0 .../plot_newsreader.rst} | 0 doc/examples/plot_parameter_server.py | 289 +++++++++++++++++ doc/examples/plot_pong_example.py | 293 ++++++++++++++++++ .../plot_resnet.rst} | 0 .../plot_streaming.rst} | 0 doc/examples/resnet/cifar_input.py | 12 +- doc/examples/rl_pong/driver.py | 213 ------------- doc/source/_static/img/thumbnails/default.png | Bin 0 -> 26786 bytes doc/source/conf.py | 44 ++- doc/source/custom_directives.py | 94 ++++++ doc/source/example-parameter-server.rst | 127 -------- doc/source/example-rl-pong.rst | 118 ------- doc/source/images/param_actor.png | Bin 0 -> 19644 bytes doc/source/index.rst | 16 +- 28 files changed, 966 insertions(+), 885 deletions(-) create mode 100644 doc/.gitignore create mode 100644 doc/examples/README.rst delete mode 100644 doc/examples/hyperopt/hyperopt_adaptive.py delete mode 100644 doc/examples/hyperopt/hyperopt_simple.py delete mode 100644 doc/examples/hyperopt/objective.py create mode 100644 doc/examples/overview.rst rename doc/{source/example-a3c.rst => examples/plot_example-a3c.rst} (99%) create mode 100644 doc/examples/plot_hyperparameter.py rename doc/{source/example-lbfgs.rst => examples/plot_lbfgs.rst} (100%) rename doc/{source/example-newsreader.rst => examples/plot_newsreader.rst} (100%) create mode 100644 doc/examples/plot_parameter_server.py create mode 100644 doc/examples/plot_pong_example.py rename doc/{source/example-resnet.rst => examples/plot_resnet.rst} (100%) rename doc/{source/example-streaming.rst => examples/plot_streaming.rst} (100%) delete mode 100644 doc/examples/rl_pong/driver.py create mode 100644 doc/source/_static/img/thumbnails/default.png create mode 100644 doc/source/custom_directives.py delete mode 100644 doc/source/example-parameter-server.rst delete mode 100644 doc/source/example-rl-pong.rst create mode 100644 doc/source/images/param_actor.png diff --git a/.travis.yml b/.travis.yml index ab1562bd0..2e696d54e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -47,7 +47,7 @@ matrix: - export PATH="$HOME/miniconda/bin:$PATH" - cd doc - pip install -q -r requirements-doc.txt - - pip install -q yapf==0.23.0 + - pip install -q yapf==0.23.0 sphinx-gallery - sphinx-build -W -b html -d _build/doctrees source _build/html - cd .. # Run Python linting, ignore dict vs {} (C408), others are defaults diff --git a/ci/jenkins_tests/run_multi_node_tests.sh b/ci/jenkins_tests/run_multi_node_tests.sh index c0d68af4a..a1f622f32 100755 --- a/ci/jenkins_tests/run_multi_node_tests.sh +++ b/ci/jenkins_tests/run_multi_node_tests.sh @@ -15,6 +15,17 @@ DOCKER_SHA=$($ROOT_DIR/../../build-docker.sh --output-sha --no-cache) SUPPRESS_OUTPUT=$ROOT_DIR/../suppress_output echo "Using Docker image" $DOCKER_SHA +######################## EXAMPLE TESTS ################################# + +$SUPPRESS_OUTPUT docker run --rm --shm-size=${SHM_SIZE} --memory=${MEMORY_SIZE} $DOCKER_SHA \ + python /ray/doc/examples/plot_pong_example.py + +$SUPPRESS_OUTPUT docker run --rm --shm-size=${SHM_SIZE} --memory=${MEMORY_SIZE} $DOCKER_SHA \ + python /ray/doc/examples/plot_parameter_server.py + +$SUPPRESS_OUTPUT docker run --rm --shm-size=${SHM_SIZE} --memory=${MEMORY_SIZE} $DOCKER_SHA \ + python /ray/doc/examples/plot_hyperparameter.py + ######################## RLLIB TESTS ################################# source $ROOT_DIR/run_rllib_tests.sh diff --git a/doc/.gitignore b/doc/.gitignore new file mode 100644 index 000000000..6f5e52c0e --- /dev/null +++ b/doc/.gitignore @@ -0,0 +1 @@ +auto_examples/ diff --git a/doc/Makefile b/doc/Makefile index e188f9a06..6ee544e11 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -6,6 +6,7 @@ SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build +AUTOGALLERYDIR= source/auto_examples # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) @@ -49,7 +50,7 @@ help: @echo " coverage to run coverage check of the documentation (if enabled)" clean: - rm -rf $(BUILDDIR)/* + rm -rf $(BUILDDIR)/* $(AUTOGALLERYDIR) html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html diff --git a/doc/examples/README.rst b/doc/examples/README.rst new file mode 100644 index 000000000..e69de29bb diff --git a/doc/examples/cython/cython_main.py b/doc/examples/cython/cython_main.py index 612ee0249..296b3f67c 100644 --- a/doc/examples/cython/cython_main.py +++ b/doc/examples/cython/cython_main.py @@ -97,23 +97,12 @@ def example8(): """Cython with blas. NOTE: requires scipy""" # See cython_blas.pyx for argument documentation - mat = np.array([[[2.0, 2.0], [2.0, 2.0]], [[2.0, 2.0], [2.0, 2.0]]], - dtype=np.float32) + mat = np.array( + [[[2.0, 2.0], [2.0, 2.0]], [[2.0, 2.0], [2.0, 2.0]]], dtype=np.float32) result = np.zeros((2, 2), np.float32, order="C") - run_func(cyth.compute_kernel_matrix, - "L", - "T", - 2, - 2, - 1.0, - mat, - 0, - 2, - 1.0, - result, - 2 - ) + run_func(cyth.compute_kernel_matrix, "L", "T", 2, 2, 1.0, mat, 0, 2, 1.0, + result, 2) if __name__ == "__main__": diff --git a/doc/examples/cython/setup.py b/doc/examples/cython/setup.py index 56ce48058..86021ea6d 100644 --- a/doc/examples/cython/setup.py +++ b/doc/examples/cython/setup.py @@ -25,11 +25,10 @@ except ImportError as e: # noqa modules = [os.path.join(pkg_dir, module) for module in modules] setup( - name=pkg_dir, - version="0.0.1", - description="Cython examples for Ray", - packages=[pkg_dir], - ext_modules=cythonize(modules), - install_requires=install_requires, - include_dirs=include_dirs - ) + name=pkg_dir, + version="0.0.1", + description="Cython examples for Ray", + packages=[pkg_dir], + ext_modules=cythonize(modules), + install_requires=install_requires, + include_dirs=include_dirs) diff --git a/doc/examples/hyperopt/hyperopt_adaptive.py b/doc/examples/hyperopt/hyperopt_adaptive.py deleted file mode 100644 index 727fdfcb6..000000000 --- a/doc/examples/hyperopt/hyperopt_adaptive.py +++ /dev/null @@ -1,154 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -from collections import defaultdict -import numpy as np -import ray - -from tensorflow.examples.tutorials.mnist import input_data - -import objective - -parser = argparse.ArgumentParser(description="Run the hyperparameter " - "optimization example.") -parser.add_argument("--num-starting-segments", default=5, type=int, - help="The number of training segments to start in " - "parallel.") -parser.add_argument("--num-segments", default=10, type=int, - help="The number of additional training segments to " - "perform.") -parser.add_argument("--steps-per-segment", default=20, type=int, - help="The number of steps of training to do per training " - "segment.") -parser.add_argument("--redis-address", default=None, type=str, - help="The Redis address of the cluster.") - - -if __name__ == "__main__": - args = parser.parse_args() - - ray.init(redis_address=args.redis_address) - - # The number of training passes over the dataset to use for network. - steps = args.steps_per_segment - - # Load the mnist data and turn the data into remote objects. - print("Downloading the MNIST dataset. This may take a minute.") - mnist = input_data.read_data_sets("MNIST_data", one_hot=True) - train_images = ray.put(mnist.train.images) - train_labels = ray.put(mnist.train.labels) - validation_images = ray.put(mnist.validation.images) - validation_labels = ray.put(mnist.validation.labels) - - # Keep track of the accuracies that we've seen at different numbers of - # iterations. - accuracies_by_num_steps = defaultdict(lambda: []) - - # Define a method to determine if an experiment looks promising or not. - def is_promising(experiment_info): - accuracies = experiment_info["accuracies"] - total_num_steps = experiment_info["total_num_steps"] - comparable_accuracies = accuracies_by_num_steps[total_num_steps] - if len(comparable_accuracies) == 0: - if len(accuracies) == 1: - # This means that we haven't seen anything finish yet, so keep - # running this experiment. - return True - else: - # The experiment is promising if the second half of the - # accuracies are better than the first half of the accuracies. - return (np.mean(accuracies[:len(accuracies) // 2]) < - np.mean(accuracies[len(accuracies) // 2:])) - # Otherwise, continue running the experiment if it is in the top half - # of experiments we've seen so far at this point in time. - return np.mean(accuracy > np.array(comparable_accuracies)) > 0.5 - - # Keep track of all of the experiment segments that we're running. This - # dictionary uses the object ID of the experiment as the key. - experiment_info = {} - # Keep track of the curently running experiment IDs. - remaining_ids = [] - - # Keep track of the best hyperparameters and the best accuracy. - best_hyperparameters = None - best_accuracy = 0 - - # A function for generating random hyperparameters. - def generate_hyperparameters(): - return {"learning_rate": 10 ** np.random.uniform(-5, 5), - "batch_size": np.random.randint(1, 100), - "dropout": np.random.uniform(0, 1), - "stddev": 10 ** np.random.uniform(-5, 5)} - - # Launch some initial experiments. - for _ in range(args.num_starting_segments): - hyperparameters = generate_hyperparameters() - experiment_id = objective.train_cnn_and_compute_accuracy.remote( - hyperparameters, steps, train_images, train_labels, - validation_images, validation_labels) - experiment_info[experiment_id] = {"hyperparameters": hyperparameters, - "total_num_steps": steps, - "accuracies": []} - remaining_ids.append(experiment_id) - - for _ in range(args.num_segments): - # Wait for a segment of an experiment to finish. - ready_ids, remaining_ids = ray.wait(remaining_ids, num_returns=1) - experiment_id = ready_ids[0] - # Get the accuracy and the weights. - accuracy, weights = ray.get(experiment_id) - # Update the experiment info. - previous_info = experiment_info[experiment_id] - previous_info["accuracies"].append(accuracy) - - # Update the best accuracy and best hyperparameters. - if accuracy > best_accuracy: - best_hyperparameters = previous_info["hyperparameters"] - best_accuracy = accuracy - - if is_promising(previous_info): - # If the experiment still looks promising, then continue running - # it. - print("Continuing to run the experiment with hyperparameters {}." - .format(previous_info["hyperparameters"])) - new_hyperparameters = previous_info["hyperparameters"] - new_info = {"hyperparameters": new_hyperparameters, - "total_num_steps": (previous_info["total_num_steps"] + - steps), - "accuracies": previous_info["accuracies"][:]} - starting_weights = weights - else: - # If the experiment does not look promising, start a new - # experiment. - print("Ending the experiment with hyperparameters {}." - .format(previous_info["hyperparameters"])) - new_hyperparameters = generate_hyperparameters() - new_info = {"hyperparameters": new_hyperparameters, - "total_num_steps": steps, - "accuracies": []} - starting_weights = None - - # Start running the next segment. - new_experiment_id = objective.train_cnn_and_compute_accuracy.remote( - new_hyperparameters, steps, train_images, train_labels, - validation_images, validation_labels, weights=starting_weights) - experiment_info[new_experiment_id] = new_info - remaining_ids.append(new_experiment_id) - - # Update the set of all accuracies that we've seen. - accuracies_by_num_steps[previous_info["total_num_steps"]].append( - accuracy) - - # Record the best performing set of hyperparameters. - print("""Best accuracy was {:.3} with - learning_rate: {:.2} - batch_size: {} - dropout: {:.2} - stddev: {:.2} - """.format(100 * best_accuracy, - best_hyperparameters["learning_rate"], - best_hyperparameters["batch_size"], - best_hyperparameters["dropout"], - best_hyperparameters["stddev"])) diff --git a/doc/examples/hyperopt/hyperopt_simple.py b/doc/examples/hyperopt/hyperopt_simple.py deleted file mode 100644 index 1a22f7c1d..000000000 --- a/doc/examples/hyperopt/hyperopt_simple.py +++ /dev/null @@ -1,100 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import ray -import argparse - -from tensorflow.examples.tutorials.mnist import input_data - -import objective - -parser = argparse.ArgumentParser(description="Run the hyperparameter " - "optimization example.") -parser.add_argument("--trials", default=2, type=int, - help="The number of random trials to do.") -parser.add_argument("--steps", default=10, type=int, - help="The number of steps of training to do per network.") -parser.add_argument("--redis-address", default=None, type=str, - help="The Redis address of the cluster.") - - -if __name__ == "__main__": - args = parser.parse_args() - - ray.init(redis_address=args.redis_address) - - # The number of sets of random hyperparameters to try. - trials = args.trials - # The number of training passes over the dataset to use for network. - steps = args.steps - - # Load the mnist data and turn the data into remote objects. - print("Downloading the MNIST dataset. This may take a minute.") - mnist = input_data.read_data_sets("MNIST_data", one_hot=True) - train_images = ray.put(mnist.train.images) - train_labels = ray.put(mnist.train.labels) - validation_images = ray.put(mnist.validation.images) - validation_labels = ray.put(mnist.validation.labels) - - # Keep track of the best hyperparameters and the best accuracy. - best_hyperparameters = None - best_accuracy = 0 - # This list holds the object IDs for all of the experiments that we have - # launched and that have not yet been processed. - remaining_ids = [] - # This is a dictionary mapping the object ID of an experiment to the - # hyerparameters used for that experiment. - hyperparameters_mapping = {} - - # A function for generating random hyperparameters. - def generate_hyperparameters(): - return {"learning_rate": 10 ** np.random.uniform(-5, 5), - "batch_size": np.random.randint(1, 100), - "dropout": np.random.uniform(0, 1), - "stddev": 10 ** np.random.uniform(-5, 5)} - - # Randomly generate some hyperparameters, and launch a task for each set. - for i in range(trials): - hyperparameters = generate_hyperparameters() - accuracy_id = objective.train_cnn_and_compute_accuracy.remote( - hyperparameters, steps, train_images, train_labels, - validation_images, validation_labels) - remaining_ids.append(accuracy_id) - # Keep track of which hyperparameters correspond to this experiment. - hyperparameters_mapping[accuracy_id] = hyperparameters - - # Fetch and print the results of the tasks in the order that they complete. - for i in range(trials): - # Use ray.wait to get the object ID of the first task that completes. - ready_ids, remaining_ids = ray.wait(remaining_ids) - # Process the output of this task. - result_id = ready_ids[0] - hyperparameters = hyperparameters_mapping[result_id] - accuracy, _ = ray.get(result_id) - print("""We achieve accuracy {:.3}% with - learning_rate: {:.2} - batch_size: {} - dropout: {:.2} - stddev: {:.2} - """.format(100 * accuracy, - hyperparameters["learning_rate"], - hyperparameters["batch_size"], - hyperparameters["dropout"], - hyperparameters["stddev"])) - if accuracy > best_accuracy: - best_hyperparameters = hyperparameters - best_accuracy = accuracy - - # Record the best performing set of hyperparameters. - print("""Best accuracy over {} trials was {:.3} with - learning_rate: {:.2} - batch_size: {} - dropout: {:.2} - stddev: {:.2} - """.format(trials, 100 * best_accuracy, - best_hyperparameters["learning_rate"], - best_hyperparameters["batch_size"], - best_hyperparameters["dropout"], - best_hyperparameters["stddev"])) diff --git a/doc/examples/hyperopt/objective.py b/doc/examples/hyperopt/objective.py deleted file mode 100644 index b531bd219..000000000 --- a/doc/examples/hyperopt/objective.py +++ /dev/null @@ -1,127 +0,0 @@ -# Most of the tensorflow code is adapted from Tensorflow's tutorial on using -# CNNs to train MNIST -# https://www.tensorflow.org/versions/r0.9/tutorials/mnist/pros/index.html#build-a-multilayer-convolutional-network. # noqa: E501 - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -import ray -import ray.experimental.tf_utils - - -def get_batch(data, batch_index, batch_size): - # This method currently drops data when num_data is not divisible by - # batch_size. - num_data = data.shape[0] - num_batches = num_data // batch_size - batch_index %= num_batches - return data[(batch_index * batch_size):((batch_index + 1) * batch_size)] - - -def weight(shape, stddev): - initial = tf.truncated_normal(shape, stddev=stddev) - return tf.Variable(initial) - - -def bias(shape): - initial = tf.constant(0.1, shape=shape) - return tf.Variable(initial) - - -def conv2d(x, W): - return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME") - - -def max_pool_2x2(x): - return tf.nn.max_pool( - x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") - - -def cnn_setup(x, y, keep_prob, lr, stddev): - first_hidden = 32 - second_hidden = 64 - fc_hidden = 1024 - W_conv1 = weight([5, 5, 1, first_hidden], stddev) - B_conv1 = bias([first_hidden]) - x_image = tf.reshape(x, [-1, 28, 28, 1]) - h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + B_conv1) - h_pool1 = max_pool_2x2(h_conv1) - W_conv2 = weight([5, 5, first_hidden, second_hidden], stddev) - b_conv2 = bias([second_hidden]) - h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) - h_pool2 = max_pool_2x2(h_conv2) - W_fc1 = weight([7 * 7 * second_hidden, fc_hidden], stddev) - b_fc1 = bias([fc_hidden]) - h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * second_hidden]) - h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) - h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) - W_fc2 = weight([fc_hidden, 10], stddev) - b_fc2 = bias([10]) - y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) - cross_entropy = tf.reduce_mean( - -tf.reduce_sum(y * tf.log(y_conv), reduction_indices=[1])) - correct_pred = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y, 1)) - return (tf.train.AdamOptimizer(lr).minimize(cross_entropy), - tf.reduce_mean(tf.cast(correct_pred, tf.float32)), cross_entropy) - - -# Define a remote function that takes a set of hyperparameters as well as the -# data, consructs and trains a network, and returns the validation accuracy. -@ray.remote -def train_cnn_and_compute_accuracy(params, - steps, - train_images, - train_labels, - validation_images, - validation_labels, - weights=None): - # Extract the hyperparameters from the params dictionary. - learning_rate = params["learning_rate"] - batch_size = params["batch_size"] - keep = 1 - params["dropout"] - stddev = params["stddev"] - # Create the network and related variables. - with tf.Graph().as_default(): - # Create the input placeholders for the network. - x = tf.placeholder(tf.float32, shape=[None, 784]) - y = tf.placeholder(tf.float32, shape=[None, 10]) - keep_prob = tf.placeholder(tf.float32) - # Create the network. - train_step, accuracy, loss = cnn_setup(x, y, keep_prob, learning_rate, - stddev) - # Do the training and evaluation. - with tf.Session() as sess: - # Use the TensorFlowVariables utility. This is only necessary if we - # want to set and get the weights. - variables = ray.experimental.tf_utils.TensorFlowVariables( - loss, sess) - # Initialize the network weights. - sess.run(tf.global_variables_initializer()) - # If some network weights were passed in, set those. - if weights is not None: - variables.set_weights(weights) - # Do some steps of training. - for i in range(1, steps + 1): - # Fetch the next batch of data. - image_batch = get_batch(train_images, i, batch_size) - label_batch = get_batch(train_labels, i, batch_size) - # Do one step of training. - sess.run( - train_step, - feed_dict={ - x: image_batch, - y: label_batch, - keep_prob: keep - }) - # Training is done, so compute the validation accuracy and the - # current weights and return. - totalacc = accuracy.eval(feed_dict={ - x: validation_images, - y: validation_labels, - keep_prob: 1.0 - }) - new_weights = variables.get_weights() - return float(totalacc), new_weights diff --git a/doc/examples/overview.rst b/doc/examples/overview.rst new file mode 100644 index 000000000..a69e9a9db --- /dev/null +++ b/doc/examples/overview.rst @@ -0,0 +1,34 @@ +Examples Overview +================= + +.. customgalleryitem:: + :tooltip: Build a simple parameter server using Ray. + :description: :doc:`/auto_examples/plot_parameter_server` + +.. customgalleryitem:: + :tooltip: Asynchronous Advantage Actor Critic agent using Ray. + :description: :doc:`/auto_examples/plot_example-a3c` + +.. customgalleryitem:: + :tooltip: Simple parallel asynchronous hyperparameter evaluation. + :description: :doc:`/auto_examples/plot_hyperparameter` + +.. customgalleryitem:: + :tooltip: Parallelizing a policy gradient calculation on OpenAI Gym Pong. + :description: :doc:`/auto_examples/plot_pong_example` + +.. customgalleryitem:: + :tooltip: Walkthrough of parallelizing the L-BFGS algorithm. + :description: :doc:`/auto_examples/plot_lbfgs` + +.. customgalleryitem:: + :tooltip: Implementing a simple news reader using Ray. + :description: :doc:`/auto_examples/plot_newsreader` + +.. customgalleryitem:: + :tooltip: Using Ray to train ResNet across multiple GPUs. + :description: :doc:`/auto_examples/plot_resnet` + +.. customgalleryitem:: + :tooltip: Implement a simple streaming application using Ray’s actors. + :description: :doc:`/auto_examples/plot_streaming` diff --git a/doc/source/example-a3c.rst b/doc/examples/plot_example-a3c.rst similarity index 99% rename from doc/source/example-a3c.rst rename to doc/examples/plot_example-a3c.rst index 821cd7186..be620f959 100644 --- a/doc/source/example-a3c.rst +++ b/doc/examples/plot_example-a3c.rst @@ -113,7 +113,6 @@ Driver Code Walkthrough The driver manages the coordination among workers and handles updating the global model parameters. The main training script looks like the following. - .. code-block:: python import numpy as np diff --git a/doc/examples/plot_hyperparameter.py b/doc/examples/plot_hyperparameter.py new file mode 100644 index 000000000..b77da2184 --- /dev/null +++ b/doc/examples/plot_hyperparameter.py @@ -0,0 +1,178 @@ +""" +Simple Parallel Model Selection +=============================== + +In this example, we'll demonstrate how to quickly write a hyperparameter +tuning script that evaluates a set of hyperparameters in parallel. + +This script will demonstrate how to use two important parts of the Ray API: +using ``ray.remote`` to define remote functions and ``ray.wait`` to wait for +their results to be ready. + +.. important:: For a production-grade implementation of distributed + hyperparameter tuning, use `Tune`_, a scalable hyperparameter + tuning library built using Ray's Actor API. + +.. _`Tune`: https://ray.readthedocs.io/en/latest/tune.html +""" +import os +import numpy as np +from filelock import FileLock + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms + +import ray + +ray.init() + +# The number of sets of random hyperparameters to try. +num_evaluations = 10 + + +# A function for generating random hyperparameters. +def generate_hyperparameters(): + return { + "learning_rate": 10**np.random.uniform(-5, 1), + "batch_size": np.random.randint(1, 100), + "momentum": np.random.uniform(0, 1) + } + + +def get_data_loaders(batch_size): + mnist_transforms = transforms.Compose( + [transforms.ToTensor(), + transforms.Normalize((0.1307, ), (0.3081, ))]) + + # We add FileLock here because multiple workers will want to + # download data, and this may cause overwrites since + # DataLoader is not threadsafe. + with FileLock(os.path.expanduser("~/data.lock")): + train_loader = torch.utils.data.DataLoader( + datasets.MNIST( + "~/data", + train=True, + download=True, + transform=mnist_transforms), + batch_size=batch_size, + shuffle=True) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST("~/data", train=False, transform=mnist_transforms), + batch_size=batch_size, + shuffle=True) + return train_loader, test_loader + + +class ConvNet(nn.Module): + """Simple two layer Convolutional Neural Network.""" + + def __init__(self): + super(ConvNet, self).__init__() + self.conv1 = nn.Conv2d(1, 3, kernel_size=3) + self.fc = nn.Linear(192, 10) + + def forward(self, x): + x = F.relu(F.max_pool2d(self.conv1(x), 3)) + x = x.view(-1, 192) + x = self.fc(x) + return F.log_softmax(x, dim=1) + + +def train(model, optimizer, train_loader, device=torch.device("cpu")): + """Optimize the model with one pass over the data. + + Cuts off at 1024 samples to simplify training. + """ + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + if batch_idx * len(data) > 1024: + return + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + + +def test(model, test_loader, device=torch.device("cpu")): + """Checks the validation accuracy of the model. + + Cuts off at 512 samples for simplicity. + """ + model.eval() + correct = 0 + total = 0 + with torch.no_grad(): + for batch_idx, (data, target) in enumerate(test_loader): + if batch_idx * len(data) > 512: + break + data, target = data.to(device), target.to(device) + outputs = model(data) + _, predicted = torch.max(outputs.data, 1) + total += target.size(0) + correct += (predicted == target).sum().item() + + return correct / total + + +@ray.remote +def evaluate_hyperparameters(config): + model = ConvNet() + train_loader, test_loader = get_data_loaders(config["batch_size"]) + optimizer = optim.SGD( + model.parameters(), + lr=config["learning_rate"], + momentum=config["momentum"]) + train(model, optimizer, train_loader) + return test(model, test_loader) + + +# Keep track of the best hyperparameters and the best accuracy. +best_hyperparameters = None +best_accuracy = 0 +# A list holding the object IDs for all of the experiments that we have +# launched but have not yet been processed. +remaining_ids = [] +# A dictionary mapping an experiment's object ID to its hyperparameters. +# hyerparameters used for that experiment. +hyperparameters_mapping = {} + +# Randomly generate sets of hyperparameters and launch a task to test each set. +for i in range(num_evaluations): + hyperparameters = generate_hyperparameters() + accuracy_id = evaluate_hyperparameters.remote(hyperparameters) + remaining_ids.append(accuracy_id) + hyperparameters_mapping[accuracy_id] = hyperparameters + +# Fetch and print the results of the tasks in the order that they complete. +while remaining_ids: + # Use ray.wait to get the object ID of the first task that completes. + done_ids, remaining_ids = ray.wait(remaining_ids) + # There is only one return result by default. + result_id = done_ids[0] + + hyperparameters = hyperparameters_mapping[result_id] + accuracy = ray.get(result_id) + print("""We achieve accuracy {:.3}% with + learning_rate: {:.2} + batch_size: {} + momentum: {:.2} + """.format(100 * accuracy, hyperparameters["learning_rate"], + hyperparameters["batch_size"], hyperparameters["momentum"])) + if accuracy > best_accuracy: + best_hyperparameters = hyperparameters + best_accuracy = accuracy + +# Record the best performing set of hyperparameters. +print("""Best accuracy over {} trials was {:.3} with + learning_rate: {:.2} + batch_size: {} + momentum: {:.2} + """.format(num_evaluations, 100 * best_accuracy, + best_hyperparameters["learning_rate"], + best_hyperparameters["batch_size"], + best_hyperparameters["momentum"])) diff --git a/doc/source/example-lbfgs.rst b/doc/examples/plot_lbfgs.rst similarity index 100% rename from doc/source/example-lbfgs.rst rename to doc/examples/plot_lbfgs.rst diff --git a/doc/source/example-newsreader.rst b/doc/examples/plot_newsreader.rst similarity index 100% rename from doc/source/example-newsreader.rst rename to doc/examples/plot_newsreader.rst diff --git a/doc/examples/plot_parameter_server.py b/doc/examples/plot_parameter_server.py new file mode 100644 index 000000000..53d8c73b7 --- /dev/null +++ b/doc/examples/plot_parameter_server.py @@ -0,0 +1,289 @@ +""" +Parameter Server +================ + +The parameter server is a framework for distributed machine learning training. + +In the parameter server framework, a centralized server (or group of server +nodes) maintains global shared parameters of a machine-learning model +(e.g., a neural network) while the data and computation of calculating +updates (i.e., gradient descent updates) are distributed over worker nodes. + +.. image:: ../images/param_actor.png + :align: center + +Parameter servers are a core part of many machine learning applications. This +document walks through how to implement simple synchronous and asynchronous +parameter servers using Ray actors. + +To run the application, first install some dependencies. + +.. code-block:: bash + + pip install torch torchvision filelock + +Let's first define some helper functions and import some dependencies. + +""" +import os +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision import datasets, transforms +from filelock import FileLock +import numpy as np + +import ray + + +def get_data_loader(): + """Safely downloads data. Returns training/validation set dataloader.""" + mnist_transforms = transforms.Compose( + [transforms.ToTensor(), + transforms.Normalize((0.1307, ), (0.3081, ))]) + + # We add FileLock here because multiple workers will want to + # download data, and this may cause overwrites since + # DataLoader is not threadsafe. + with FileLock(os.path.expanduser("~/data.lock")): + train_loader = torch.utils.data.DataLoader( + datasets.MNIST( + "~/data", + train=True, + download=True, + transform=mnist_transforms), + batch_size=128, + shuffle=True) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST("~/data", train=False, transform=mnist_transforms), + batch_size=128, + shuffle=True) + return train_loader, test_loader + + +def evaluate(model, test_loader): + """Evaluates the accuracy of the model on a validation dataset.""" + model.eval() + correct = 0 + total = 0 + with torch.no_grad(): + for batch_idx, (data, target) in enumerate(test_loader): + # This is only set to finish evaluation faster. + if batch_idx * len(data) > 1024: + break + outputs = model(data) + _, predicted = torch.max(outputs.data, 1) + total += target.size(0) + correct += (predicted == target).sum().item() + return 100. * correct / total + + +####################################################################### +# Setup: Defining the Neural Network +# ---------------------------------- +# +# We define a small neural network to use in training. We provide +# some helper functions for obtaining data, including getter/setter +# methods for gradients and weights. + + +class ConvNet(nn.Module): + """Small ConvNet for MNIST.""" + + def __init__(self): + super(ConvNet, self).__init__() + self.conv1 = nn.Conv2d(1, 3, kernel_size=3) + self.fc = nn.Linear(192, 10) + + def forward(self, x): + x = F.relu(F.max_pool2d(self.conv1(x), 3)) + x = x.view(-1, 192) + x = self.fc(x) + return F.log_softmax(x, dim=1) + + def get_weights(self): + return {k: v.cpu() for k, v in self.state_dict().items()} + + def set_weights(self, weights): + self.load_state_dict(weights) + + def get_gradients(self): + grads = [] + for p in self.parameters(): + grad = None if p.grad is None else p.grad.data.cpu().numpy() + grads.append(grad) + return grads + + def set_gradients(self, gradients): + for g, p in zip(gradients, self.parameters()): + if g is not None: + p.grad = torch.from_numpy(g) + + +########################################################################### +# Defining the Parameter Server +# ----------------------------- +# +# The parameter server will hold a copy of the model. +# During training, it will: +# +# 1. Receive gradients and apply them to its model. +# +# 2. Send the updated model back to the workers. +# +# The ``@ray.remote`` decorator defines a remote process. It wraps the +# ParameterServer class and allows users to instantiate it as a +# remote actor. + + +@ray.remote +class ParameterServer(object): + def __init__(self, lr): + self.model = ConvNet() + self.optimizer = torch.optim.SGD(self.model.parameters(), lr=lr) + + def apply_gradients(self, *gradients): + summed_gradients = [ + np.stack(gradient_zip).sum(axis=0) + for gradient_zip in zip(*gradients) + ] + self.optimizer.zero_grad() + self.model.set_gradients(summed_gradients) + self.optimizer.step() + return self.model.get_weights() + + def get_weights(self): + return self.model.get_weights() + + +########################################################################### +# Defining the Worker +# ------------------- +# The worker will also hold a copy of the model. During training. it will +# continuously evaluate data and send gradients +# to the parameter server. The worker will synchronize its model with the +# Parameter Server model weights. + + +@ray.remote +class DataWorker(object): + def __init__(self): + self.model = ConvNet() + self.data_iterator = iter(get_data_loader()[0]) + + def compute_gradients(self, weights): + self.model.set_weights(weights) + try: + data, target = next(self.data_iterator) + except StopIteration: # When the epoch ends, start a new epoch. + self.data_iterator = iter(get_data_loader()[0]) + data, target = next(self.data_iterator) + self.model.zero_grad() + output = self.model(data) + loss = F.nll_loss(output, target) + loss.backward() + return self.model.get_gradients() + + +########################################################################### +# Synchronous Parameter Server Training +# ------------------------------------- +# We'll now create a synchronous parameter server training scheme. We'll first +# instantiate a process for the parameter server, along with multiple +# workers. + +iterations = 200 +num_workers = 2 + +ray.init(ignore_reinit_error=True) +ps = ParameterServer.remote(1e-2) +workers = [DataWorker.remote() for i in range(num_workers)] + +########################################################################### +# We'll also instantiate a model on the driver process to evaluate the test +# accuracy during training. + +model = ConvNet() +test_loader = get_data_loader()[1] + +########################################################################### +# Training alternates between: +# +# 1. Computing the gradients given the current weights from the server +# 2. Updating the parameter server's weights with the gradients. + +print("Running synchronous parameter server training.") +current_weights = ps.get_weights.remote() +for i in range(iterations): + gradients = [ + worker.compute_gradients.remote(current_weights) for worker in workers + ] + # Calculate update after all gradients are available. + current_weights = ps.apply_gradients.remote(*gradients) + + if i % 10 == 0: + # Evaluate the current model. + model.set_weights(ray.get(current_weights)) + accuracy = evaluate(model, test_loader) + print("Iter {}: \taccuracy is {:.1f}".format(i, accuracy)) + +print("Final accuracy is {:.1f}.".format(accuracy)) +# Clean up Ray resources and processes before the next example. +ray.shutdown() + +########################################################################### +# Asynchronous Parameter Server Training +# -------------------------------------- +# We'll now create a synchronous parameter server training scheme. We'll first +# instantiate a process for the parameter server, along with multiple +# workers. + +print("Running Asynchronous Parameter Server Training.") + +ray.init(ignore_reinit_error=True) +ps = ParameterServer.remote(1e-2) +workers = [DataWorker.remote() for i in range(num_workers)] + +########################################################################### +# Here, workers will asynchronously compute the gradients given its +# current weights and send these gradients to the parameter server as +# soon as they are ready. When the Parameter server finishes applying the +# new gradient, the server will send back a copy of the current weights to the +# worker. The worker will then update the weights and repeat. + +current_weights = ps.get_weights.remote() + +gradients = {} +for worker in workers: + gradients[worker.compute_gradients.remote(current_weights)] = worker + +for i in range(iterations * num_workers): + ready_gradient_list, _ = ray.wait(list(gradients)) + ready_gradient_id = ready_gradient_list[0] + worker = gradients.pop(ready_gradient_id) + + # Compute and apply gradients. + current_weights = ps.apply_gradients.remote(*[ready_gradient_id]) + gradients[worker.compute_gradients.remote(current_weights)] = worker + + if i % 10 == 0: + # Evaluate the current model after every 10 updates. + model.set_weights(ray.get(current_weights)) + accuracy = evaluate(model, test_loader) + print("Iter {}: \taccuracy is {:.1f}".format(i, accuracy)) + +print("Final accuracy is {:.1f}.".format(accuracy)) + +############################################################################## +# Final Thoughts +# -------------- +# +# This approach is powerful because it enables you to implement a parameter +# server with a few lines of code as part of a Python application. +# As a result, this simplifies the deployment of applications that use +# parameter servers and to modify the behavior of the parameter server. +# +# For example, sharding the parameter server, changing the update rule, +# switch between asynchronous and synchronous updates, ignoring +# straggler workers, or any number of other customizations, +# will only require a few extra lines of code. diff --git a/doc/examples/plot_pong_example.py b/doc/examples/plot_pong_example.py new file mode 100644 index 000000000..9b0ef73dd --- /dev/null +++ b/doc/examples/plot_pong_example.py @@ -0,0 +1,293 @@ +# flake8: noqa +""" +Learning to Play Pong +===================== + +In this example, we'll train a **very simple** neural network to play Pong using +the OpenAI Gym. + +At a high level, we will use multiple Ray actors to obtain simulation rollouts +and calculate gradient simultaneously. We will then centralize these +gradients and update the neural network. The updated neural network will +then be passed back to each Ray actor for more gradient calculation. + +This application is adapted, with minimal modifications, from +Andrej Karpathy's `source code`_ (see the accompanying `blog post`_). + +To run the application, first install some dependencies. + +.. code-block:: bash + + pip install gym[atari] + +At the moment, on a large machine with 64 physical cores, computing an update +with a batch of size 1 takes about 1 second, a batch of size 10 takes about 2.5 +seconds. A batch of size 60 takes about 3 seconds. On a cluster with 11 nodes, +each with 18 physical cores, a batch of size 300 takes about 10 seconds. If the +numbers you see differ from these by much, take a look at the +**Troubleshooting** section at the bottom of this page and consider `submitting +an issue`_. + +.. _`source code`: https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5 +.. _`blog post`: http://karpathy.github.io/2016/05/31/rl/ +.. _`submitting an issue`: https://github.com/ray-project/ray/issues + +**Note** that these times depend on how long the rollouts take, which in turn +depends on how well the policy is doing. For example, a really bad policy will +lose very quickly. As the policy learns, we should expect these numbers to +increase. +""" +import numpy as np +import os +import ray +import time + +import gym + +############################################################################## +# Hyperparameters +# --------------- +# +# Here we'll define a couple of the hyperparameters that are used. + +H = 200 # The number of hidden layer neurons. +gamma = 0.99 # The discount factor for reward. +decay_rate = 0.99 # The decay factor for RMSProp leaky sum of grad^2. +D = 80 * 80 # The input dimensionality: 80x80 grid. +learning_rate = 1e-4 # Magnitude of the update. + +############################################################################# +# Helper Functions +# ---------------- +# +# We first define a few helper functions: +# +# 1. Preprocessing: The ``preprocess`` function will +# preprocess the original 210x160x3 uint8 frame into a one-dimensional 6400 +# float vector. +# +# 2. Reward Processing: The ``process_rewards`` function will calculate +# a discounted reward. This formula states that the "value" of a +# sampled action is the weighted sum of all rewards afterwards, +# but later rewards are exponentially less important. +# +# 3. Rollout: The ``rollout`` function plays an entire game of Pong (until +# either the computer or the RL agent loses). + + +def preprocess(img): + # Crop the image. + img = img[35:195] + # Downsample by factor of 2. + img = img[::2, ::2, 0] + # Erase background (background type 1). + img[img == 144] = 0 + # Erase background (background type 2). + img[img == 109] = 0 + # Set everything else (paddles, ball) to 1. + img[img != 0] = 1 + return img.astype(np.float).ravel() + + +def process_rewards(r): + """Compute discounted reward from a vector of rewards.""" + discounted_r = np.zeros_like(r) + running_add = 0 + for t in reversed(range(0, r.size)): + # Reset the sum, since this was a game boundary (pong specific!). + if r[t] != 0: + running_add = 0 + running_add = running_add * gamma + r[t] + discounted_r[t] = running_add + return discounted_r + + +def rollout(model, env): + """Evaluates env and model until the env returns "Done". + + Returns: + xs: A list of observations + hs: A list of model hidden states per observation + dlogps: A list of gradients + drs: A list of rewards. + + """ + # Reset the game. + observation = env.reset() + # Note that prev_x is used in computing the difference frame. + prev_x = None + xs, hs, dlogps, drs = [], [], [], [] + done = False + while not done: + cur_x = preprocess(observation) + x = cur_x - prev_x if prev_x is not None else np.zeros(D) + prev_x = cur_x + + aprob, h = model.policy_forward(x) + # Sample an action. + action = 2 if np.random.uniform() < aprob else 3 + + # The observation. + xs.append(x) + # The hidden state. + hs.append(h) + y = 1 if action == 2 else 0 # A "fake label". + # The gradient that encourages the action that was taken to be + # taken (see http://cs231n.github.io/neural-networks-2/#losses if + # confused). + dlogps.append(y - aprob) + + observation, reward, done, info = env.step(action) + + # Record reward (has to be done after we call step() to get reward + # for previous action). + drs.append(reward) + return xs, hs, dlogps, drs + + +############################################################################## +# Neural Network +# -------------- +# Here, a neural network is used to define a "policy" +# for playing Pong (that is, a function that chooses an action given a state). +# +# To implement a neural network in NumPy, we need to provide helper functions +# for calculating updates and computing the output of the neural network +# given an input, which in our case is an observation. + + +class Model(): + """This class holds the neural network weights.""" + + def __init__(self): + self.weights = {} + self.weights["W1"] = np.random.randn(H, D) / np.sqrt(D) + self.weights["W2"] = np.random.randn(H) / np.sqrt(H) + + def policy_forward(self, x): + h = np.dot(self.weights["W1"], x) + h[h < 0] = 0 # ReLU nonlinearity. + logp = np.dot(self.weights["W2"], h) + # Softmax + p = 1.0 / (1.0 + np.exp(-logp)) + # Return probability of taking action 2, and hidden state. + return p, h + + def policy_backward(self, eph, epx, epdlogp): + """Backward pass to calculate gradients. + + Arguments: + eph: Array of intermediate hidden states. + epx: Array of experiences (observations. + epdlogp: Array of logps (output of last layer before softmax/ + + """ + dW2 = np.dot(eph.T, epdlogp).ravel() + dh = np.outer(epdlogp, self.weights["W2"]) + # Backprop relu. + dh[eph <= 0] = 0 + dW1 = np.dot(dh.T, epx) + return {"W1": dW1, "W2": dW2} + + def update(self, grad_buffer, rmsprop_cache, lr, decay): + """Applies the gradients to the model parameters with RMSProp.""" + for k, v in self.weights.items(): + g = grad_buffer[k] + rmsprop_cache[k] = (decay * rmsprop_cache[k] + (1 - decay) * g**2) + self.weights[k] += lr * g / (np.sqrt(rmsprop_cache[k]) + 1e-5) + + +def zero_grads(grad_buffer): + """Reset the batch gradient buffer.""" + for k, v in grad_buffer.items(): + grad_buffer[k] = np.zeros_like(v) + + +############################################################################# +# Parallelizing Gradients +# ----------------------- +# We define an **actor**, which is responsible for taking a model and an env +# and performing a rollout + computing a gradient update. + +ray.init() + + +@ray.remote +class RolloutWorker(object): + def __init__(self): + # Tell numpy to only use one core. If we don't do this, each actor may + # try to use all of the cores and the resulting contention may result + # in no speedup over the serial version. Note that if numpy is using + # OpenBLAS, then you need to set OPENBLAS_NUM_THREADS=1, and you + # probably need to do it from the command line (so it happens before + # numpy is imported). + os.environ["MKL_NUM_THREADS"] = "1" + self.env = gym.make("Pong-v0") + + def compute_gradient(self, model): + # Compute a simulation episode. + xs, hs, dlogps, drs = rollout(model, self.env) + reward_sum = sum(drs) + # Vectorize the arrays. + epx = np.vstack(xs) + eph = np.vstack(hs) + epdlogp = np.vstack(dlogps) + epr = np.vstack(drs) + + # Compute the discounted reward backward through time. + discounted_epr = process_rewards(epr) + # Standardize the rewards to be unit normal (helps control the gradient + # estimator variance). + discounted_epr -= np.mean(discounted_epr) + discounted_epr /= np.std(discounted_epr) + # Modulate the gradient with advantage (the policy gradient magic + # happens right here). + epdlogp *= discounted_epr + return model.policy_backward(eph, epx, epdlogp), reward_sum + + +############################################################################# +# Running +# ------- +# +# This example is easy to parallelize because the network can play ten games +# in parallel and no information needs to be shared between the games. +# +# In the loop, the network repeatedly plays games of Pong and +# records a gradient from each game. Every ten games, the gradients are +# combined together and used to update the network. + +iterations = 20 +batch_size = 4 +model = Model() +actors = [RolloutWorker.remote() for _ in range(batch_size)] + +running_reward = None +# "Xavier" initialization. +# Update buffers that add up gradients over a batch. +grad_buffer = {k: np.zeros_like(v) for k, v in model.weights.items()} +# Update the rmsprop memory. +rmsprop_cache = {k: np.zeros_like(v) for k, v in model.weights.items()} + +for i in range(1, 1 + iterations): + model_id = ray.put(model) + gradient_ids = [] + # Launch tasks to compute gradients from multiple rollouts in parallel. + start_time = time.time() + gradient_ids = [ + actor.compute_gradient.remote(model_id) for actor in actors + ] + for batch in range(batch_size): + [grad_id], gradient_ids = ray.wait(gradient_ids) + grad, reward_sum = ray.get(grad_id) + # Accumulate the gradient over batch. + for k in model.weights: + grad_buffer[k] += grad[k] + running_reward = (reward_sum if running_reward is None else + running_reward * 0.99 + reward_sum * 0.01) + end_time = time.time() + print("Batch {} computed {} rollouts in {} seconds, " + "running mean is {}".format(i, batch_size, end_time - start_time, + running_reward)) + model.update(grad_buffer, rmsprop_cache, learning_rate, decay_rate) + zero_grads(grad_buffer) diff --git a/doc/source/example-resnet.rst b/doc/examples/plot_resnet.rst similarity index 100% rename from doc/source/example-resnet.rst rename to doc/examples/plot_resnet.rst diff --git a/doc/source/example-streaming.rst b/doc/examples/plot_streaming.rst similarity index 100% rename from doc/source/example-streaming.rst rename to doc/examples/plot_streaming.rst diff --git a/doc/examples/resnet/cifar_input.py b/doc/examples/resnet/cifar_input.py index d19466561..283a66a27 100644 --- a/doc/examples/resnet/cifar_input.py +++ b/doc/examples/resnet/cifar_input.py @@ -34,8 +34,8 @@ def build_data(data_path, size, dataset): def load_transform(value): # Convert these examples to dense labels and processed images. record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes]) - label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), - tf.int32) + label = tf.cast( + tf.slice(record, [label_offset], [label_bytes]), tf.int32) # Convert from string to [depth * height * width] to # [depth, height, width]. depth_major = tf.reshape( @@ -44,10 +44,11 @@ def build_data(data_path, size, dataset): # Convert from [depth, height, width] to [height, width, depth]. image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32) return (image, label) + # Read examples from files in the filename queue. data_files = tf.gfile.Glob(data_path) - data = tf.contrib.data.FixedLengthRecordDataset(data_files, - record_bytes=record_bytes) + data = tf.contrib.data.FixedLengthRecordDataset( + data_files, record_bytes=record_bytes) data = data.map(load_transform) data = data.batch(size) iterator = data.make_one_shot_iterator() @@ -102,8 +103,7 @@ def build_input(data, batch_size, dataset, train): labels = tf.reshape(labels, [batch_size, 1]) indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1]) labels = tf.sparse_to_dense( - tf.concat([indices, labels], 1), - [batch_size, num_classes], 1.0, 0.0) + tf.concat([indices, labels], 1), [batch_size, num_classes], 1.0, 0.0) assert len(images.get_shape()) == 4 assert images.get_shape()[0] == batch_size diff --git a/doc/examples/rl_pong/driver.py b/doc/examples/rl_pong/driver.py deleted file mode 100644 index ab9a63446..000000000 --- a/doc/examples/rl_pong/driver.py +++ /dev/null @@ -1,213 +0,0 @@ -# This code is copied and adapted from Andrej Karpathy's code for learning to -# play Pong https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import numpy as np -import os -import ray -import time - -import gym - -# Define some hyperparameters. - -# The number of hidden layer neurons. -H = 200 -learning_rate = 1e-4 -# Discount factor for reward. -gamma = 0.99 -# The decay factor for RMSProp leaky sum of grad^2. -decay_rate = 0.99 - -# The input dimensionality: 80x80 grid. -D = 80 * 80 - - -def sigmoid(x): - # Sigmoid "squashing" function to interval [0, 1]. - return 1.0 / (1.0 + np.exp(-x)) - - -def preprocess(img): - """Preprocess 210x160x3 uint8 frame into 6400 (80x80) 1D float vector.""" - # Crop the image. - img = img[35:195] - # Downsample by factor of 2. - img = img[::2, ::2, 0] - # Erase background (background type 1). - img[img == 144] = 0 - # Erase background (background type 2). - img[img == 109] = 0 - # Set everything else (paddles, ball) to 1. - img[img != 0] = 1 - return img.astype(np.float).ravel() - - -def discount_rewards(r): - """take 1D float array of rewards and compute discounted reward""" - discounted_r = np.zeros_like(r) - running_add = 0 - for t in reversed(range(0, r.size)): - # Reset the sum, since this was a game boundary (pong specific!). - if r[t] != 0: - running_add = 0 - running_add = running_add * gamma + r[t] - discounted_r[t] = running_add - return discounted_r - - -def policy_forward(x, model): - h = np.dot(model["W1"], x) - h[h < 0] = 0 # ReLU nonlinearity. - logp = np.dot(model["W2"], h) - p = sigmoid(logp) - # Return probability of taking action 2, and hidden state. - return p, h - - -def policy_backward(eph, epx, epdlogp, model): - """backward pass. (eph is array of intermediate hidden states)""" - dW2 = np.dot(eph.T, epdlogp).ravel() - dh = np.outer(epdlogp, model["W2"]) - # Backprop relu. - dh[eph <= 0] = 0 - dW1 = np.dot(dh.T, epx) - return {"W1": dW1, "W2": dW2} - - -@ray.remote -class PongEnv(object): - def __init__(self): - # Tell numpy to only use one core. If we don't do this, each actor may - # try to use all of the cores and the resulting contention may result - # in no speedup over the serial version. Note that if numpy is using - # OpenBLAS, then you need to set OPENBLAS_NUM_THREADS=1, and you - # probably need to do it from the command line (so it happens before - # numpy is imported). - os.environ["MKL_NUM_THREADS"] = "1" - self.env = gym.make("Pong-v0") - - def compute_gradient(self, model): - # Reset the game. - observation = self.env.reset() - # Note that prev_x is used in computing the difference frame. - prev_x = None - xs, hs, dlogps, drs = [], [], [], [] - reward_sum = 0 - done = False - while not done: - cur_x = preprocess(observation) - x = cur_x - prev_x if prev_x is not None else np.zeros(D) - prev_x = cur_x - - aprob, h = policy_forward(x, model) - # Sample an action. - action = 2 if np.random.uniform() < aprob else 3 - - # The observation. - xs.append(x) - # The hidden state. - hs.append(h) - y = 1 if action == 2 else 0 # A "fake label". - # The gradient that encourages the action that was taken to be - # taken (see http://cs231n.github.io/neural-networks-2/#losses if - # confused). - dlogps.append(y - aprob) - - observation, reward, done, info = self.env.step(action) - reward_sum += reward - - # Record reward (has to be done after we call step() to get reward - # for previous action). - drs.append(reward) - - epx = np.vstack(xs) - eph = np.vstack(hs) - epdlogp = np.vstack(dlogps) - epr = np.vstack(drs) - # Reset the array memory. - xs, hs, dlogps, drs = [], [], [], [] - - # Compute the discounted reward backward through time. - discounted_epr = discount_rewards(epr) - # Standardize the rewards to be unit normal (helps control the gradient - # estimator variance). - discounted_epr -= np.mean(discounted_epr) - discounted_epr /= np.std(discounted_epr) - # Modulate the gradient with advantage (the policy gradient magic - # happens right here). - epdlogp *= discounted_epr - return policy_backward(eph, epx, epdlogp, model), reward_sum - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Train an RL agent on Pong.") - parser.add_argument( - "--batch-size", - default=10, - type=int, - help="The number of rollouts to do per batch.") - parser.add_argument( - "--redis-address", - default=None, - type=str, - help="The Redis address of the cluster.") - parser.add_argument( - "--iterations", - default=-1, - type=int, - help="The number of model updates to perform. By " - "default, training will not terminate.") - args = parser.parse_args() - batch_size = args.batch_size - - ray.init(redis_address=args.redis_address) - - # Run the reinforcement learning. - - running_reward = None - batch_num = 1 - model = {} - # "Xavier" initialization. - model["W1"] = np.random.randn(H, D) / np.sqrt(D) - model["W2"] = np.random.randn(H) / np.sqrt(H) - # Update buffers that add up gradients over a batch. - grad_buffer = {k: np.zeros_like(v) for k, v in model.items()} - # Update the rmsprop memory. - rmsprop_cache = {k: np.zeros_like(v) for k, v in model.items()} - actors = [PongEnv.remote() for _ in range(batch_size)] - iteration = 0 - while iteration != args.iterations: - iteration += 1 - model_id = ray.put(model) - actions = [] - # Launch tasks to compute gradients from multiple rollouts in parallel. - start_time = time.time() - for i in range(batch_size): - action_id = actors[i].compute_gradient.remote(model_id) - actions.append(action_id) - for i in range(batch_size): - action_id, actions = ray.wait(actions) - grad, reward_sum = ray.get(action_id[0]) - # Accumulate the gradient over batch. - for k in model: - grad_buffer[k] += grad[k] - running_reward = (reward_sum if running_reward is None else - running_reward * 0.99 + reward_sum * 0.01) - end_time = time.time() - print("Batch {} computed {} rollouts in {} seconds, " - "running mean is {}".format(batch_num, batch_size, - end_time - start_time, - running_reward)) - for k, v in model.items(): - g = grad_buffer[k] - rmsprop_cache[k] = ( - decay_rate * rmsprop_cache[k] + (1 - decay_rate) * g**2) - model[k] += learning_rate * g / (np.sqrt(rmsprop_cache[k]) + 1e-5) - # Reset the batch gradient buffer. - grad_buffer[k] = np.zeros_like(v) - batch_num += 1 diff --git a/doc/source/_static/img/thumbnails/default.png b/doc/source/_static/img/thumbnails/default.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc GIT binary patch literal 26786 zcmdRV^;2BU^L2u2U~zXzaCgrl!EKS?!Gl|HcbDK2+!l9tx8Uwhki}V?FVCmmf8zb& zR^6I=Yo=zVr~8~U-QmiL(eC?7t2K>sEKRQ>SbW8C{gfQ0bg;;5VB{^5g#S%F4jS*yo)sIw9jjnBhR8wuiv*@haoMbsRi`-;s) z9j%)m&qag`YmLl;-VH-wNFilp)`}=RE6iLb8P1ZXot7lPS$|;j zQ6%{N?}NV#OoGh&8zR;Jmp{}RVNldC0&_43DEv|>!G?j^q;OxAyX7Gs_2A9hI-55o zJ)F=U@-pAz+w*?0hK)p%z(s{qJ1Q+Wv_8Z|z3!m1Cs=&-?)J_5Y815AaDH?$jYLqo(p$kO2iW|9P%S0_{dC|2h=c{Q!&bPfpfv0!@c%J z#_Pjh7|{hTI0kIgMg zxdOWkb%vZV^4lVL61EU=I9L*Tl+?h$RKCIz*L82w5j+y@957yB-O`t_fpVnvVkw>7 zP^nvW>L7(+HJXphZ(3gXtZxLHZqdPVywxY{9!|2K&AEn|@;JMhAZj7?4y>5SAxJrx zurYaog3=|Jtznf^wPDLtdAwz{-~VzM&<1f=cKIarDeaAWbp~AUPnA+mO6+`%gWKCS zLp?PEPTuCPmyhdAo0>J$4)AJv7RQniq@AMH$$_|;sq({O!`Uktc6UT#*dpB361N~I z%}5Y5rnnv`0pPZ?4^O==rd+Ct+d=y*e@vt1VbH@M#e;)=cJ$F$(v2B^Vh2r+Vx~*+Z!G1Zgmq&GRTTsGq2_{@Nw>D#DWRt=`t`6c z7T8oA;ZHP!1QLB9M_mrUm>*io2P8vwgJhjCQtU#iZyN}MKEQUSOs}$gCgi)#1GuGMe*f#+Z&G1I+vYi6K9g)e*Db>=_;17hfr#kK~YJsMnHz>ujQG zarIZk$|9PqN6mLW?3l6YSILh*)(}TG6B!@=Y&v9B#NjQ@cyJA{AT*Rq{1RO3KfRt& zITYih)&2p&c2`~JyxOI7rZ8o2AVPcMJ0duaT+CTY?{Jzelx=^QZxo4TA}5e|0FnJx z2h9h%+q97bj6^`YgpWV zZE^bI*Om;wM(XMc%-aGKa}>axr_VZ=;zDWt8O2-J^Bxv}Ut2%&^MW1SJF|SbFz&kG zy&cWX=>0)&urGzaisqDY!NnC_{E{vDTOHp*`@+VJID&^uD425yvR;&V7JOge@>7l;gt``i}FMDWyMVa`YI-%kD!E>S> zL|JS2ysS@b3@)Qb=BIHHjmnEDA|!p~!={`J0RJ#+a8dEKKlk|iw9a|2w`u3eeq7J2 zf-BAe-~bY*&0(nx$VAm(#iNTWBKx>hl>cQ(d|miS=*dXB;Oo=bVeiP)u35s(Z6STU zHpM@WqxIr<<7Z_^I!@Fq-s0HMekIN5ZdZuA8p<$z5M*`7l{e(44IGp#EYwEGo9l@@ z6GbbJ5wt%PcTH`mE9=xg_LPL9p}aIOXebdn84NVw&M*s5qmw zSAf$>HX1)|Ed8?UGO(gY6zH<4agAW*RQ-Ux7>@~#J zmS$%j>>%DF4Y=98ql77up`%Kpu;DPvUrv*EhV?47yMlP0yl6hzZ|fFAuTWA614#IsaO$_k zcURoeoZI7P`+2*MMC}ycnKbq zdhu&vPwa4VxmvM1`j?s0O}oTP&vV-WMtq+90RkmnkO&!Yvc#TgvQ`BFLbdrz?S;T~ zehN^!jvpxcNekpZ#eH3Pq#LsoY`b@>QN$aAV{~(&#mAEtd`K+bsZ4IGv=%(zq*3Xs z;3Rfjc zkktoX6uL9`be86_LQik`_Ya-b3(;-vd_U$lnA*wemDJ|?HO9AdUpJdGrMmL_VXdOB z53f;FLk`&=fS`S&PG^o`)InjY@-!!OH&@QVDqhZ(i_7rANfL{LH&pQ<#6nd=k@R&QX0zW~~Wq*_`b9rNZYq=4x|Qbh_5fW+|0jZXKA2Gglg!KYS4dpoJq9281$ ztI5PC9ZkFni;lbsSRI+`dV_<2vI#7bicpH+{d`{I&a(w2bj5n;~YRsn$5{c$bbM zLp`gi<}PqEx5=fu{@$`b8|&0pik;)ojkp*SGu9J-aY?~8d}E6YnF?L^Q*N$;h)+)M zM%+AaC)QY@X5SI#N78RF95e}jrx?i{u%sUH( z&F-em&Q+v^3Hfvted*?*6eORtd44he9$m_2uf_-u6H^xFve=)EF>h`gg3$ic6_)ea zQjmCUv-pK|(_$10b)X&)yZeaX`^%g0HIP?RAgl7^kiyoT z2Sc+M8OsBmIm<3h^&9K>>%hL~oMI^=-xk0PY9Hx^Ewq?q#4JliEENe7 zE(%!vQ~SSRB+U}ICT{;Xs%G|W-``$ zTF?jz$B)r(#XFeWV2$xsxW0HgSK7HZ@UUu-{kkE@Lky5%Pt6>X5`q95F(?F5Eu70V z!Y>OcUe9E%E7;u4;(y!~BRy<>z1^!Gdg6;?irc1Kk2vwYfn1HrnS{p1m`s{7WIG?q zAmwgD`j59UQbjRcnPP5pEhm5`vOhBW)AUYPuYPW_hIU&}F7HbX3fy+AX{6>soT1g# zD2<)GielaDxB>of?4Ec<(TnLceijD3Ze7h0suV%Imdj$OeLqMG6_R z2~|YF_uo{q{8~vyb`SpCf4k4BOkZ)C9Z#YMq^*{p-nuS;<;qOG_UAM1&feCCiddBx zMJw3EZGul1{lKuaW+!gvI&1gqo8~S?EHUJEyGCTllavF1YKf&rfe_)nm%h6q7}tQG zoP&Jl>0O2#)6-ziNKN4>5eq5R1U*{hVX|Ea$nt8Up;o}NK_}Sa^+yp2c6@lwN7ugJ zx0l!|AeDK{vY)+KzY##WxgI_mwN07=>*i9|zWkt|6wvLW-)tY*xl*^^CiPvX-1&~S zyr_gH%_YmC|;YvYVG~9*;Mw3dd$|J7DCGwH}Xc#hH=bz`L$TM>C*V9Kc%UCtgd=DUUdizG>g|erOOUj<7DvV(6^D9or9O(7bT=6$}@LsxS zk@>EP%gzAq6D(&R$zi@M3fmbeY2f0**!_C6MG`}CfPGjfhD5fNZ!~+ytpwiY6q(sD%>cY^p2zfX5}_gov(_;9#$3S2FiPC0h8M$c1_s|YrK#c9hz3Z zS^Mo0f{rwaO8HX#f0Cl>E3B&p7Iimm*->hfWS|jI^s54--;kJs@ivU1Uvm0JIKAxQ z9j51*FUG(H%gM>y2h={YoqQRrIN@HuB#z3i#`*@4*>PfO$_#ANdi@V-3-h5ytMH$} zA5)R2hKPDZ!<|e!qLyWl&f!h1?T77=v{Ed_)U)7s4lI!ezI@H@ITI_=4w;|$gGC-- zqnT{sKOCjQJ+dG{&a7cEarZm7OYq%#s7Z=E$&_Mo)5?Yc_!+|zWIxKujt7;W{M*z7 zdIpA^lJFVpS|T&%8BGUV4|TSSSKRLlbJ3V zpf#PCmbS|L{_K<&Jxj6cCd9S3EnNFPkhg4(T8rL!hEfy9guQ5#m7F?d5$6xV!L9Ki zvHaoMuQd_TC&Et~x3#ncbG9jf(}$D_`H$A`s#`h?WvFA{O5II{L~ z;n;$McVrUq$V7R=1u8=R4}3M6KNs{7Y?KY2aI97+-_26QPIK_Q^3L0#MTZob${Z(6 z;c{kO{Db;5W(oKwO=vOta`Vb9@fp)=U%2jM&T~ZXGYiH5+Y`dVL)}tKBc;PN=8Rjf z-lq^DIQt9dUauz1cphzsnIBy<6&VKNtG+TDk7yTT1&JG^#$7@9Ug0QLX8Cesr zDT4$yL9BpBb*hLn(j~jnr0Mpala3iKY%U z9&0$&f~ukr%wUCJ1$7_S%k3^gJY3Qo&x#D+>5*k7asU{pbo=+tuT>=ZkisU2J)SQl zNnsdHAtoh@YGRKCd;MzEo9XlJJANNAeL_%VDc2jS_@r*mfT-^FE0_4GiV6BUA}jj3 zYmvDcoN{XT@}^wT`ScccVr~%P1~c;45AR?55X4Dy_6w+|WBYIFo`5ZT5n4GZ9r7^C zZX9%iE?D4>((*4f@oZ2xAnJ=_!6C=pi;GTYt2Q&=X^4`L+uHX!t9K@4Hvsq)9-|7JXfeN3?RjZTGBQ)tpe$Ph3xgNn2vX-#!!xgl!h<}a>k3zZ}Q1r z0=yeM$ERRwwY0GCDS7BQS)I28KOC|IpeAsl+sXUdhjm2);H+zeM(`3>r4Ccp?+Aaw zRonhho{{ZHfJ*r}dGr)&XAt)$f)^ zwG&gX6WQYDMf$JF+%(T%22m)b!H$5&i`8gDlg-JaP^D5JIqak@ckkoWIn|{&fjV`` z+xegEsEnub@7%Hx{->Wu$h*c;fA5MEQ5lQHLpHa!np8c#)i1GirdNF(^RKg-G@5)C zw@yQS;dKgEo*B%2PoUn{gD90lUy+<(B#ycGfxvZBICB^%4sg@xoh*-q<$OZ7heJ;I zudhy0@1Im0s6L<7M4Jh+I<*C#s!OxX;2ZfBx9h0JseLe=xN zah5^>ps>R(59~(zOi1-jll(L&4e$yQNO;xc@w$CJU!{IebA?~WmdMuW$_>|;<)AAR0^ZkVi0M% zrpexd?5bveEUm!wFQDq%{l$YPiOH|r+Vr<7#lYb7Sd?Q|X#(=({MdHW^jcbJaG!Fs zn1U(1Rk^!U;6(oPG4N5MB<*4AuwOAA&IE&C>F>XVbzVNPA}Z{p<)uQ~miwp0rj5^8 zWl-NN>uAVrQ_J1_^??qBKq0u)d?q(dtyJX}Zpx^;yA~#TH6YVMkmLV{rQX{Nt<(naEB6e?+*x4oOmJGPF?|oNY*0^LAB{K!~Icu^pN9Md|nEz@X?5?@C zXxIg5O#lXlWePd&$Uki$wj$A{KMx6hdpAL;95@4~em@@p?CLIidi+$4roQPHu_=3l zL$PrisR0at1aiHkYR$NJFed!+2Ll!4ti;i_y7$3M)eG=}$>WT!5{u^o=u|YnJqXZP z#7UDzX@(O8*TZ7h-_*3t@G!Qp^a?6qN*^Ki4*>XBhH`(FoMnW6wWpu|TQtC9+Gs*c zI^1%AbEds0r9=@M>V^U1Zm=yHK8le(C>wU={CdijvQu*HblVgb9Z}EAq-rqh-me+d zT!nQ*hxP6=*#40WvtS45hp?uFz)K5+twjXVm=9f)Og`{`qQp)UH+gm}e!e3jA zd`@(yV>EMEUg!1q@uuOaA;(nqZzMhowr#lb_1GOfLBRKWeG-|nj^97;GAgTTKh!{e z7f>HIq68dR*RlvE>{}u83_s*7j?g@HV~PHowFWL{WUO@9+ ztHGK7i_RP{8-K&Z+sT1EY?}p;?5rikIZL!k7)muurAQ?y<9^8PGMk+`7F~bFZWxYV zk(tpGGH*}}#R+EPuoT1ajCk+=0#1HT{sa^8q8X%@p_PPu@rG= z1jY0v?OGE>OZ6S9Dt%bs}Q=LyFq&oKYyAF5m5xBgI8Zv6{@AOb(I)-*E^Eqpc` zBINhLnkP&cKL7kB+7-(G^CBg-zIS56Vt*omIL}dLOBpMUlCvayA9AcnCjpWDXyOwa zJUGr71oK%LosW`~Y7G%h07D=#U|re9*<#W+K!_ai4~%MF-=}x(1m(|T+Z?_b)XFtz zolau3{WSx)Hu(ra1p-L|!uG%#%`ymd=f3VtVL_RgjzCT!y)o}V!7aV^){r2V+BJXA1{kCc%t=x-G4m_l$hRDAHZ zgf8y){c#SiOj531*iB^?%|BhrYdWRENof_1$SA94lw0JH&bOJ6oPU582DlOqjZ0+o zym^uKnz{4hoiIMC=sGZpp{#M@PWxlVb4Z4E=hy`d9NFqeJb^Rx!pT^umM4rb&Q<;+ zr zaUJ=~IQ+N1J_p@uoY5psZ6?)QhK`#y>ezxKhKFF7o^{8r#V&325P?SN)WqNh*KxVHE+;RVXR1G0zQt-C*wNv-{FJJT z%2=gOcyxE4wv+wB&=k|dBR5Xsicx^N;_B*%%%7k?`ojfV<}t-_`nzog%01*FM^uLt z-_SpQf>rwDni$JsxmxcU#rT*;@%*R-pqtpYCOo66HpTn>Qz)vSV9Gl%!+1KcJ3!}w zXJD8}`(!q!3n`Q2?w$-ZbJjH!2HqEE(OWLjhx7=3VzkxRhBUo2hj;X@pV;08@Z0jK z*kgB?GbABO^oR2$!;pXdq3l0Cf#!uBuG#MT0!rjXd=bTJduL-E<{bc&YcWCR$ZnLs z3cLocLPcVa_5?TZ>=zc8l3b6%W03G|EdL`kI2z004bJ=VW}QW&kJG_S*&8|Bvnt46 z+&ae!2!_e>r>sLRTCxSF{9+9kyTkWf`RHTGwR?DwzI}qae*BbS z3o;_%zK-{xlp|me%s={fd?wOVMcVB7#Wp4G zp5bASD>|*~$AaY`*;DS0z&(I9&uy6}WNu2rnk4%_DU*U-W;ri0M-$Er^$pIfhB=qTaRJ!3B zo!Ekl`c5O0d98e>?C-)?Fc}%1;oYp$g!zU(Mn$x~%a`6DJ_SRMr9e+Kc2{0?#w}Rq zN@BQa(~I-wo{ZBq?5*w%QI7 zVaW5>!PUoZ8=ycP!>pS7l7)c-%Py4IoH+Nkac@Ygj75$B_z|VJ`Pdy0p_@n0A;tbe zli&;B(hC%(8MzV3QAQ6XUJ;l})H2a^e$MPmFUfn~7SFjfA&6NF_IhE|!3%}H<0L_t z6m)5*JXUR9{!6fM^1)ce^he2J*`CN@a=|kA?U5=1wx#?_WQGULb^+O6_Z3#0 zlIlN^(2mg))Jw~pl717rLqznOjKbd@3Axyit$M7Rc_vXD`cXdX@td|be=UvS?6y{2 zN=vV_IX<$|k(D=Dmj3q!dKC|Tchm7T#eI&zZe{G7!_LCXc}xqadtzzxxl(ieJO^80 zB0am9B~=T}y(E^@60|}-4nuAibh=zBwJ^}f@6h6BzrNnMc2Y+fuGa%71s=7`q9#Bi zgc70U!G%55@eHW$O6sjtGgV5p^uIE7q($2MZQz?2^L{N{$CvF<*L5qwn)@YsHw+a} zJidzg<&4HZnBBEx^Dni#8P$E)`Leqhcrh)%uZ>QbI^f*ze&`U$+|#BO?R8~ryqy-` z?a+2;%Of~Gr?`&AjfsR&QQCT)W^XeN!$UY}E$VU!^PaoX5|`qvl| zi(`C$dYtc2w+Q-?)hA=;IJFqSKFZlFKrEmo9OUhRXaQ-+1di zHOtP>cUtozS;VFdN1A10VlKi__;Qk^-*s|jt3BwBn}!pg*C>(jrS>b(bwSdrgPB98 zn|+G{TA9y<2v^?{6hynG*@JQVYEMoM3_6--U;WS(j>l`dcOMi(@`utYTf9&Hss|&5 zhT`crTl&94rKk&ui|4cJ_2WW)TqSOxFUn^G^cw_$grGg~B^J-4kd|eEp2di$uP91t zI^W~j0UM721B&fnM2cHjxlKDJp8>-0%OUXDQU7XIoI)`CZHPMpr~ zo$Xc0D1YJ#6yaY+5(emlqt5vqN5&T>m~KBUQvwSdjc`rbVUfXgbHVrT2y57n4osM5 z1?fcFP-v}zMPoQi*TKI*u(k6z^_IJ?K{d%ooId4i@S_ire_tepd6wRm=$SX`{3rvb zXT&gl`=bp#d-m{=7YdH*4LFk6gH99=>P7qWn|0AQYd!SsB80Z^*;~O;i^B1ir(KPo z4Gb<0vUqU%7^A!atZ(6XyPMLr`$n;x4jJ!se>2f$KvWoXR94n#Xj|265%MIXeH)9C z{!KL&ac~_o7m+FQxPhYcr}Dp-M4I?1@m_^kGZSkMZ!(&sSvRpY(r|XF+K^TLEl-PG zJu?xNnRM)_pG(H_YwxykMFM*=jrF~IR=u93wmGoZNG^@uvLPY0)4`_%ThbaYMzq7 zx^EF=_wQ*R#=gY8v)$W9vD^+B?>n7Aa$nwSP9KT+H!8&Lc{zH1(K3j0E5CUUq)moG zl;Gwi*7)SoEGc~?So@L5v|6jg*0F2a4%yqHa_a_DL-I(gURRR4fvz26jZ$jdTkQgh zsULhVyT2{5AarwWamnky+ec=$W1HY7L}M~Kq!@j@C>=TNx~X;2)i`y|9Bs0!fk5um zWa-~%(cN*^Lofbti3F1;7R6^3z=}{eUz&Hs#mRi@lj1e6W!aWuX}ms78%&E&L^UIo z5b#U`iNHWs&gL+{J{4wcGr+q%XTNi%CKrCp#!Ano&?b9IzybQYx$&sIW~7BYKfXsG zH}AS0mFe4 zP5F+GUS{-T!_3ghpJwc`Bf8F?AfecS^I3zZ-wddmk!OA85+YZdxsUkY916X^)rygk zIa06&h`3s-i$YUk6}r5AoK8CSJ702JS#iY;ocWmU)sy#uk;<;xv}XZ!by@#*6-9S6 ziHB#VHGDk4@!22$Y`@}zVJYiH$cL0R;%={HTO0|EKJmq=R=f!{paP~=7V5yKc|NeQ zcGzR*C1&O>kJQYBf|u~f)Z~Z~(biHNo^sWE`ULP_m4~t+96rZ(@HQ{p>hz()hC&<~h5TO51VcYZuQ+`_q(@io}Y*O^Oww8#VFbyn>NQ{E@=f zbx6F#gxRn$B3r*3{qWNFEv`kkI$SRXEvZ+P>+mkLT$9?p!R>znEe^t<1z;X~9f>|q ztncS;JPir4e@M)4`T+&ED(Xt;u6yAa5?FDeDI=Ko8KEl^tbe)6co*fmZZ?X%ufG>R z=OQZd4v2<0jZ4x=Brno?G6yUh|653~NeS11uSL<-EpxM}y@8jk^1j!c0?`PLEktWt zkb9^WxC;-u$*g_H;^=vH$hguWv?~!q!lX`x_TaDWVg%mmOG)Efk}_2A3ra6MlI+nd z3k#pt(2Ed!6C=}i3_etMXIU(tA1yBiFTyl-V!fy`l}QD;0Ag-D*1v$h*iU|t1n#ky z71C0r3LaQDFMCGl@(W1Zjk{TKERx=nln_i@s+ft0Kwjp#*ER-(ocIEUjc@teKxGw; zZs^xCQH2>cFpKtGdpL)dicB+^|E2{vD&0@GE}Z5j=CLr46ISd+P?pfA#=Pq$I$J7| z?+!3h+BQ2^BBS8kDKQ>RDm?`FLVKjIsqld{D~Nh+4P@f)5s6I`6G>YtDqg?K%ebzd zb%MJt*t{s>;h%gg!nApSzDP;ob0>yhdS-LG-%clLSYEmVs-x!rC%S-ZuR`;3=mdsc8h4fLz~W!K8$0l5ZqcZve}2oT5L>vXpvm%C-|YJVuRd$vG^oLD zVU)YmpzQP?pcIP&L~5j0YovAQv~+PM$wBul^4XI@VX4!6I6h+wiqtQ8FON<*Iaij? zbcE=kVvpSTXnJTDlaXXq>pR2ZxoSz5oX^ceTAdv2ZBHu|la z_H0~(J(GX*nw;J{;b1$xs|)Q*RE+w-K74mLUY1zw_XIeaY3%5zuJsSRU!&~{F<*Ac zS$=gT;(L@Zd!d<*=q&=DtINhUYKZdlrZ9n!CAHH{zL9JtrC+YPB)om=dO^&AxKm|- zw@OL(+bx-29-V}*veZW^zfSDGTZmNUx(#NEUN}C;{m?no>sglzTGmR+4=}kU(rvPO zaX2>@y~tlQuzkboXLlm0vTp>M5bmXSOJWO0-fki}T$)P#NzeDbaSGZwJV68-9ehzS+`0ns6zyK+E{o#td|pbbTGbm%1_8fNi&Kj9o{Wd2@JD zS1+1DaB1FgAn8-ZjeN85n~0pvZC_Q` z7&mX15R1N^2k!<4xevA3%->tFbMFX0Ar2d8f&Hx#nSSS1*ibY$%B0D!Ed7jCrw*F&Ak z{+@Z!m3g+cQl-XyjV&RD`mo8r*Rs@YHa-~%9e~nP z*V@mF#4=|-h)~+=oeICmp2K)k^`Z<)n+9vj!3 z(6&A7m9P9AEkDG$=u?ShfMy-V`_){2Z$y{>FWLJ^S@}S8G;B$i?bB*Op4$H?T^>>A z|Fc)9v4rQZobN*@p0_&9cutg|YO_q=I&lCF%Z9xL-;DA!8ad7~7*>PhXv;Np>v`Dg z=@(krAL|xc!5!--#*?lvTm4+;fr=A|UL3O_s<~Y7#HbDElD0N^Dm{%FXIF;j>&6kb z!P?G6KS}g7y%vn&aN^T0C{Qv(@!0tX1V$wy(h>r_}UAL~mmu$7&2W@UpmP zNM_RXz9~z4)^Sc!%jnXkJ4i=g~|MdjCqvp|^d#%FdQdoEl;euf0ysPC(dZ zH32?wAHlQ%XD+4smbfDcHFEpDO0*)M{!Q+kC9)M%frZrtQ-SKmB}#Y2=*QHGt=+M# zoZa#SI8hU}_+}dxrUI|of6_;Jd7juc8n(9MsefdyRoCPq%nssJPM9Ipb@#Mw9#56+#N-ReXm&8#k~io@{r0BJ@0yanPW5Cm21znR zhDomFwb~Gx{dj?g`t&P^b7uPGHN#^L=&Gpb!l=IK1J?%7hwNjizPm?B#mVaC83Rh} zBP~9eXit@MAnOES&Ns{`t79@mNCD@Fe}fDkwNN9px3=`^1aC?ePiaSBzCC{i+~@YR zeKMO~VM))>$#=BdnJHiUvR-K`xzW)G>b&|Zd;2%peg3Zta4jM(I7$nb$^?>7CL0q^ ziG`^Kj6RyC0K4G=$7aH(J4S(wP%WEO8>!i=<&o#P8jaT$jW~MFq;Luo6mQ&r$QznT zfAUC!I}M${k0cl=CwmIjby{3&(wMCFR)z~~O(9r;+H3uzWh(|RJXuZpV1Y|QwQA`GxF91ddFomjA-ORw)_?V(;hYyeAjvsfdmwMzh zef$m|B{}R_o)Kz6s0H~viwwClk=w^q(mOc15b0VmEK%7HB6)#PY1KA)YaI=z;zxwP zd}?#$__w|2J*qHCHIrZ}uE3hOd+rCxre-x_KZ>Y&kX+K^)zU%_z0Mw_Iru&7!cO&gGbD@#w(J3=}(v zsuDR}JYBAE7Vrp=V~KChF!84$OC;{b-C7@QMo~vV_cwHj>1xfxLq2BJ)=vRdp46);>a5#(NlsMDtWSRkJX~b`X=^XKSXuuClu4x@?TBS z>4*Qwf4&d(HuS z$Pqf9xX5#GRN*(vrkg|~Z^iWxZXs_G27moZ^3(^!;6rRT9TWt&FNR81ffeW@C6dI= zKp^*I(-s*L^q}A3_u5-(@AVg<*=|eUU=rL!(Mn42!4=}d=VBeD6_ccM6ICa9?JoO; zVRgv!An;#)4_<><)I+?{ieQ$`?SxpefvL153rHYequ`FVH>o)g)R{bbhnE4BgK3vvGy&5bh7hF2X2M+sRkQ-$v0T~ ze3o@P^Ec~UgLm^J@r2l&I>zNOSgI))42b3CjWl*Lx^YW}nx}6&Z+oZ$9h&l3?#fu2 z4&3cU5an-@xo4+VWXIi`5BX~X3!Eszq4}t~@cJ>P=Ym>x%*u*8JTXH6p+*NQKYYFx z0O!E`S{Aq~qQc-};}K?o_2ZDPf>4QUe+LQ?jfYBH!}kPk`r!!Vlpl0K);K8^7RWd1 zeRxx}COQWzAaos>C5bx8Vd~s_k_%#?wKJ724Jn^84AkizXYJ+GaISovGKmZ^&h#b{ zyH>T-xHN50E0arflI=dDULv@Q!qn%y+T8AL;h#D4^v)X2{ zzCi$gHyN{By4i$LtJ#l$Cu!M!9XK$-`qIT(rT4nua@of`O2`%UO*>jHIU39s@{kIS zYIYw3p@=&IM%MXhdL4Qq{N4{baAf=|Pe~d1dMVsLn=~ko>`B>hy63VhbVl+?KV|GZ zzb0<&=#yn7wx`&;`#Lwe3)OPL zJyFa<9ea=|jsJo9U^(8s@^_FI(1)+1u!f@f}g z+gJywAIoZmQIa{tQqXFJdxm?EUprnrp&SLe0vg9FV6Dxn!=KFRAwQ<72aam!nX{jm zRxl^7Bxww!dSSlfSlqe)^nLT$l7jbDPt*GxUBj?qz3C;eqgLkadA;UwdzMtY@-vi; z8f%C$D(oo?TrXL$4`Ie7g8ZQ-g5$t-jFSk`U2`hs4HgH^HPta8qUU1DI>~-*je+)m zSr7j-J;0cW5>Hl5+v|br-4Oua(-um!{OtU7vOYi&k4OUZ1YEsMEDtg#DDE{t~VJTB^gO)s|KR-|-H9sNWpV*VEgD;pi0O+jwiCWGCMJdb*7<%U}a~nlM zu84HXc&AS}cFwWNpe4zhKX!ugu$7|38d>3J()IM5Ud}{2TQ9T5_orkkm5?rJ=;+Z7 zt|)wsS@x)#c_nLmql)qeax67t!l+#Q?{t(r%FGit=bP*e-PeLKaA@beHEk!H)(7Og zr&8A88Pr?dNuQ1dS{sZyTWYjzBF0cunZZjgL5$NkR}x*7oU@a0wMS&c z?{HYZJg_5r0D8edt--0`QhEQ_c5Q1F9CQ_Q<2j_>hsGAo8GTgHCXZ}HGEE23MiNIh z5^VHzRa$LZo!EGd6kXi88@l1`avJzej!PV9!OYuN0ilE;TmQG)NwZ+rdduO&nCY1L zx}c>cYDFhGw=nKXsYA2JXz`2S2K!W8M~74JkjH;)c4%as3^EVKsHb{r{Dgv6iQUqE0 z&bnOdga(%)$Vgqkw%gI2us<@$j*Vy7F7PBdT=Rhj+aGLW|7sPJAErOZrOD7^v?d$> z7QeMTE^l={I0ewORFjdDxTS!vb#7RE!W#zSQ}-t$Q?4~`iw1Vf!^Yi7S3moq2~~M| z#38q4xr3VIaUoh$B1dCypP|tEGeMW9Kcvrtm<&-U@tt9=>lM`Z5v?pb!H<9;|A&O45hG$PGwTG)yi-E=KKDawzf zsDSript;XE2l?AiE%mMf+B8-lQ;{uRrW)_3yVxbSz8-w;s6hTAHAE>$lOP%%;(k$s zDWPu#KR33tm1avjyTQSIf8iPXu6vW`gxf=DX81tCNoErP4uZ*j@K2*p^}Jh)L9X zphTR`nF_=$yXk2Fc6Ii38|n_dMr%6sU|XiT6+%V#+N#2#O{Rqj)zv!IXm5jnPLny7 zhv>pUzZ|3X7Tyb6Cv-5LT^q6GXb1%8c*OSKeykb2|JaSFdXl!kfq-)3Q5LD;d z&i-mkAnX)2c)#g?(XSX=rKa(=H)?iE&Uy>3sl2r25-eq*S9kuN*x1aAiF`}t{p7v& zU<<*C%f0ehh()rEX(Ln?a9-2qG)KP7h|VFVPE(JjU<~WzPAVm}4mPlYe+d@$LJZfV z(WZ#SX+&T$rfY5(Z{_vA-a(PGvrgcBO2$PO%9NFfVfb2m7q;>>9>=@UDJGIvNMVokh8R;@+2sPTB& zV8;w);Ypl~!h0;0xUHw&b>HY_-m!IU4m+Lf>cZfUBi(=DL4$-LfyE^Y{AK#GzC_Ne zgicua7`N=L`&50`hq${|rnzu_!86ZbLO8OV`Xi=e6jF~TocOL9sPyiTt5v@7%(wp7 zzpm-xmutZZ*Fo1c17wLiO8}+dzW{GP;e$xSy>V<2TR1A!n3&H#N2lV^pgk=)MMcPOd;rqW#= zJ5#vg6*!R6IdM>!%zZF8WsQQ)nl}LOm)M5@Q&+ApjvTCvewAQo2bvHDaOFEl)K8;o zxIE$eyp;`1XRjLW{fm8R`LhR|Trt!3zJ9ybJ(EWZ(Yysjr@m9sj#KGemh!Hj-3}Xw`p8+Wy>xQdAZq;bf32NmSDRfEsEfM3p1pA*!hR;nP1Ol`or& zji4{X$=c^f8pN*|rBlC&qcS1+9MmLeb*Ox&(phY0o;Q+YUlx9oLZdS-Pw_8@D5K#r zAV@z7NPm;|{E%a@oiXkF+B*e~xs&ZOyE$G}JHy5n>a1$?Tj1cMxMcDxEfNwuFh5Z_ zm|}d?co$}LoVLK+YACk2ktQRYgdrh(Qp+63kLc%Qm zY~r5-lmQStd$@at0H_CW>1rgj_T&}S`N8g)r(g-|Xh4Y;Gtx_t<9U;c0}>xURK3SA ziL5c~aQB=^kuVpH#{k}nT$T>}yvm(#z+qzK@BqsNV&&&yA~2D`U&z)3IHW=~NA1`1 z6EiJ!UvqOlw2Lx>KiUla)_O&&y= zUbgaiE*Bb!h1f`c(U!AA@&(QQ@-UDN==lo0e5)K)I}zvdfxj*s)NJUCP!fS>0Q$KbcU| zCR?u*2I-~wmxg|L6hVMeeii0>MqCS%{kvY=iiW})OLt0G_XyFFuu>(F3F&@A*nVN- z?^dZryzJ+TRI|A&8tY4`1C2(rh9br+ZFc_MsU*U_a`xy4jGdcd|4(x=V`|MKL&{Rd ztxKndY@i&4mHrmwJra4PNW5D-1n(ebwLbTJBvk2E%X4K6GAMDzw=?o%4hWBDb8p!6U#LamFcEUM#0vipCaC*8# zr^XVR_@832HF|+|h0F+3kR?H{&mIE4)+sN}=-kV_od-OT7u@|SRda|{i1G2a329Ts zb|p+SUoAm1gN|cokyxl19xW=g0tx<}`n7cC7blmCFKOOC+EQ3|wlrK{>^eSbQ=B4y z;*YX;6Y)Pc7yn#J-LV6jJF3$^Qj&YdaQ8Xc-jJRN1FlB5kwPD5Dv2W*8Ueqmhrgb% zCuDPRiuxt76lwK^KE`GgLQo3V$bvU=5p)>its7-CtCQl!<%imO4Jhu?=6>JM!NRpT5c1UwOzc zyAWV|YsbnIc^X(`Xl#8>Wk3qQER`g16}Pb|2#p~5>ahIvUqSfPucp>+f|u^LzO{E_ z<$mIhyBJhIf-Jvo`|dJhJ19}r8A)rsvC#WHRKYO$Mcl`Q!%9l`wDM|9ImH_{-n|#b zVUG?bBNq|b&y&D#1%(OX|4KO#i&=f{`i4s81aeRfXEYHH>tln~vdWQDoL16^PW?m4 zvY{GsZg3Py(F|na6JioY9Q^n>oUPa=r6kqET1G@9PtGMK5V?mS)1y11X6;?MVB%LB zV6Zb~<|n+YoHrOSZhB07Z1t!E^5q``TddvdI`79SvdB#9w8F4J*~nQev4J{^P@Y0C z@{H?w=!aV}h`()pjAZsPTJX8Mqy-ESe%eo6U!KN;Dh)D3ue9^t_M|pbqqlej%;+WN z_UG!09?9<_4Fxk(IB*I!EASab;*RSFgHNYk)rqOGFngv*udKG>cD)iL4cjmBfnUh` zqum|310+{)A2K4vrUnxm%qh7gzTk0~G>%`cN8v~bAJ@K(j(A(DHqTa|B9%$m));{THTKuo8L)wbpP`#c7Z0d@f4QPC(pu|*f#ut#W=+msoZ9~+ zbo_CJmk9%&Td=sDZonl2_3g=?1AejyKC^_N-BjCnzS##5Yi$@)f+|ACzy(NaHR-R3 zyeFq$*_Xz|r064X<^53DkDEM3@&S+}@teDuwJ=+uUI`!t(KOa&gOn6K)%F0i0?>td zPYNVQm3Xs&Yt>Zp({Quu!Dfh(bQX75ON4 zVtm?pp14-%SLw8f1xiG&Y>1^Hx^Cy_bO>IByKVloN^eQRDz$lF#lxxmNIuc(_S=sC zW|GIzsCE80_w6G2%_8tWT(i11^RokujhfUoQ%bIjFgru4Fp`R%j-L`_$@5H#3=Inj zFLPZio(u#Eq3e#PE2LxdY+P8iwWDjm&DYu4!P$;y870rQYDleuaU=G$yfN?^{kVKS zQ7j9Mm4HpCO{hRW0%)$4Lj{Nj5}ud7bd+tS#h@xlm(%kpo@&jFjSpxEi>Du_1}qRl z2I_V!mb4I={~hJPk0V}omTw|S%-}Cl_<7@L25<5g%yhljVC(L!l|LH@+x-*h%(4!a|e za{TQ$OXmvL%b9l0KK4ojdL|7EGR4@1Jkwa+N6Os+5-uHu4?TQIK4*y2cSM|6l)}%R z4%Ea|SpX{T^o*Qx=wv1uo$ z>(bBgzWyb8oI~fxwoNp$=vTir>%VYsq_@rty3vZXzqX8#8Hl=7yp$tHIDgv-gI3$2 zRLG1-37+heo%R1-F7DHwrQUfvEN=U0BC?jX@b_}fj`Enr((U@vKS)ZXLrG;E?UbM7 z>m`Xs2+rs~d?SI}A^_sU?D-sTCc@QNJ&jmKQ6zfb^&onM^A>y6L+mzm#n^GI9 z_x(h8BoiGE>R}MmN63f4g#0E9c!L0Hqa14!r^tS}dXaf4$?-<^)8Q5uB7gA6&#`&^ zQ7C5#WSJLo*oLU-C3+Q-T#YfI69V$7hP$Dt_QY)}9g8Y`#eJ0@NtKgs$q3(oeQDbh z7111xO?~^6SILg!M=OsC1tW7isCmc7@_*}8M1r1%!6wtI-UJX&Q^=LCd>=Xf_UsMQn z#vVf4UM=uRr?{RFQ&ZEM8K+Fk*8Azxfr*S{*V{RbN~t;72A`#WesUMgR$6a%Bz!$k z3jbZQ2*XE6N4Gw)y3fo{;Uve^{)XcIAkc9j8iMncHwx1E6U2+S!zv?%Z`>vX3-guU z0${Pka+YFg+=oMiE?&;E-^CU79`@{X1V>+ZDeI^hMWo8lbiVD?4BnR}efM6dwOfpr z+S%%Y6U{wP7S5K&{Q_6Qu`$XfzxCBc+75T@N`Pk>eJAa@mAWeeLfRM=nR^FU-K&L70ox%e{sB#P&D!-xsC!!&Vjku`M=GZ2&+6*1OO0WN`O6`3xB$$$1Cc~P2 zZvZ;-`G{OcKuAXMls2ap51--}H zptl=EM4@mcj3RV|IXagPYshgsecOeafP}%}Cm~IPPm#d@psNUyXWi6B8>f>TmgMa^RW>1~ANq%#_s5CR)dF%~< zcO1UgMsi51$+gZT=SRN2k8idb5?$(}cbM=8&ani~!zHyv=6UaXg45G;OO4b2RIqR3 zbEUZpy6gV?+TKX@S&O$|if+4#->4=28#C?pf8t1!3l9ioLdm+cWGu?2zRDwc#OQqx+1Hb zCr6{jo8`XhhD;q^A^$0hw_w*w2D=c3K)pQkH`G)#$8vFzl-m9kxau?lN;K`2=BmGO zFGOPQq6Be=)KDNVj7cyoy9_qD`5e2RALME3V4R3=p}~g@?2m|uvR%RfCSIr=j|IhWUVUG z4wE|M3M86pgDvJ86c3lRQwM{G$Ny{W^`aR{zedy<`N3!*R9gliJdw?Tjb2Z4ql&uQ zl3awrju8-VkB)Z3mQ_>}g@af{#9Seqn5dsNL`k^_c+qh?N+hOjZTVGk^U)chEfSy% z)kMo3uuxfx!b86KEK3IF=INbk(wdBp`fjnRAJK@`xEa923uB$G|w_cZ!>&4{?@TjBYG5cFY z^l5PobrAswMhKX3G2dc9?Kl0u69Bz~G&T6zxh3RA1rjefirxV7%{2_npzkHfk3w&K zn*V5@Rn7(a7#k);4QlN8w9d$y!OfYmfuCJoE2K4+GnI`57*T&gDq9j5s!~a zI0mo(T=wU1FJG8G8Ho%EOvg_1a@1@mJ--F%c2~H2eXOeJP=G$K(RiVLbm;O-M?6F# z{Y>v6J#76KYi;amLV!EO9tp2JE|1*uVhl;3OJ}psuMjMm{Pue3(9BU_RvA<+cj@u> zP1*!GrIoN6)=aS^Q}&{?ItPPol0yLgc_z`h^oShF z_~v)HFtZF#@fkiA(in}sR*lz?L9qW0+0(*!tWYenV5 z?oUr8^11yk9puWv;pR`Q%gjMKq(Bu_1-%+=S(2ycOeC$-)#}`V+G%OUZ{~f)T#5hU z`Cv&bg>^du(V-#bPxF}JTl?Qu!T83flX|?!Vjk^-+F}O?&BZyfyxAz(%+OvA` zCfM1z#6%52Li|V!v;t4J*XVsfG{BFG_0prMHSt>XRFUfxaDTV(ThpYzA%V8(T5UZc zDv)EFXbU@FIrc5LOkV&mtW4+u%GgZh=&8}S#z5<&G4@DOcf&NbdZ?pevs)-7rg!ks ztEi(QqaR%kEaUtPi4(9tJMC$&zIE{@sUKJkEuASY5R-qpk;WyH= z;()&U0O1sM2M^6yT~T3E{i0b(K&1MN&sYJVg$vuSSeNh4@VDlET0&|fZ-?qS6b(oi z@4yEBlcN*m$!CHLDt>$ssx6C^TjdhlT#?~6wN6%f?AK6k^%DISF{4!>-#rd8GCvOK zMMNVtA;nO)E&hn9@-UJ2<ie;$?sEKP9+TSLG|18a?+l_+Qds01{PN?(6kf_+4veh)cfwX{4% zhT3a6vqe?MS#7#WUoV*^9)4y*G^lV7iR(C^&sBes8Fp`ak`ppcaSO5aB67s`MulPC zOZTEr$0iI;T?tAeaV{hb3w=*^3x0D!6{{wIjM3>1+PXNx87ps=Qg|5N#3KmBpZLn` z{X}`aAfkf+Mg#-ln8&7wM{KYxfN(=3yh|!Aa$T4&J;N?7ped%^cEZoBlB1^U+-LYL8kn@YvEgJfH7g{7XYu zLJ19&86g%gCtGvlJVH3tArUCkCsBV(m9^W=&_kXRENdTn+2x4u69>>ta%z5+AVl53cg*0hPswpCm$ z3|boDuK$EWR~Us!YHm5LJ^u7)wH`EudtoeVi7(u9#x`5xNBQ5B(|4NTS>QbeE?>fI|Y1lOJI)Igu zqRZV?;IV^6p0db*q^q8E8%EV`If3m+T8FE|)_H3I-}ss3(bw{2qFl-l8dO5xo{iD* z-y-^-Dazi!mn?O8OK`u~%{^hKdo5UF%XcjiAO%8o3nf>Hb-`=$dJV6#J6E%zfko7A zr)D>rfa*)iV&6b@K%2V2D`L0Ple@R(^s287W$%Jpx@@5fW?DC#@{Duiz9yXV)nKr| zzPja>>??$n}?mOGPg2)K1~L#|$b=M3e}*3z&@|NhH_lz_I?6{e2hvekd%51f5F@ z>y@LD%7*8+??1hzM4_WmCWOZUh*2*&rbk7W4T(=$GRFdx__%)Ux3UHPd(r0u-NW*C zdj#pCCdTDEbI~$E{bQ0_J^q+_DzB)1-Ti)&8~=OscU2ic3b$G}O1ywi#@n7-wB5C} zEK^LCL`Y01p&yM=EDT^HCyqc9FP!d&z`AuZT9qH_bZ=wMp@``X?Hf*@)K@x&uCpJpMD z)9qL%Lc^%;Xm7plwKG`D8|yV03JH>*CT17S-K8L9lx$`JcEJnP_yGWc(Sut*1IR%T z`o&DNr*?_s{-?uzPi}6;=!TuZYH);1xwyPm4b4)y=OLPt^G=vK&W89AzyxwQO0d z2~K70E%=7GNDqGSRpvSNZ#BNpf!MguMv(sT;DWL7T5DjZ#vgrp|2m4R)U6-*BkkBl z43r=eFY`9PnWB+XbVCoE1NLQKsbxKq7U2b4oI!cleDmz$ zE6R%Otdd7B=$e!J=pamMu5?Wq7T``6y&}vfKe!}d3J^Z-gQ?5~l?5Fdm^cLNI9V?( z*1nL|y&#ioe@%hxV4ihRjPK0tv*f%C2KjRbUb`t+lsUsJz;?|YU%;0!iKfPP<_!y; zv6+5{&h>H6>TOA*g9>#YCmQP}aReeHItP;x9>O z3ZT;5xucB%ec4(ZTyO)4YS^>4IS8H1&I^0x#udc0RU<3NyOigj>Ep8i8$oFPE>XSf zIO*NbCb`O8Erw|dzSH>~OVLYa6l_H_V{au2Cd7MGe;2eEjjV1aN9RPZddjzCW!1-LXb2KXZi%C6EH+wjG z&QGRY_D$+;Fjgjt(PlQ_df$jnyMEAcIW%XfJ3lbuCb=v8^(FK2XFTk&UPhC=LgXs) ziX6S(pSAkSPC<4pmtqS-;N;=L^Fx_OM*1@uLJ{h^;J4j0L7)WUuS4(Y<{bzGm1C=& zG@pw@PiyW^ywAhSINQlVgCTYKU_5bcM6>ysLAG9R3ZRc+&Dyq5s$Hd?CiN;|;6MSn z0U|**XMlDKjOds7!xfKY%;S!3EV70~8Z5^ppRT$R+*; zGML;gT2idEh}1eh#Jtkqo_)4RZtC7N@y>-sj}A`U3y#%=p?6T3#K%hc_g4C<$4=+V zUVZO%w|O11RH?ac0F4b8FzP-R1MmCBWv-+*bB5-!cp^a6_3y*hQflUV#EO5wl!A1s zMVnYDiBzGZj)tYHzXbBvSOX3rl}8`B9d71x7-)I;pQ7m8niIo03HWQyR_&9%)dm?E ztdN$)6w(t`_L!YC!y5y?J5^_#PpT#EjFo`whI#GcplwcGluooj_L~O{WHdFxGL@N=rkmxa0S)*L-sQok%{Ah*|J-y`FIqayme&($ivGXsfUpv)+(%afz8j! z!08;``p~;z9%QLnH$RBkuc7FIJQAYk#w{4*nM5OKsIh?ziPh zlbF$66>8*moO}Dcm$Ni9{e^haU->dRpFtoGgt%WI7|>t$F4oZA=XTUKfv@#E^iK^D zG|J@-hX9GJjY0U943TpF{G+a**VbZc$f497GF{NtXC_D^aV5bpvoAie#%A|lk_`=|;2Q==D_-0YCu zV6)%XI{`}(>=eXYTtEHWE$(DWT`C|F-(bfts9dKE=0c6L9tzpn9nkVdka4scQMxn> z5=cfK@xbOXX#oQ`yO(l|VtS?i$CYy@jo4N3yXpn4d)@>r*|9AKP2ep|W5EtbXRU>= ztH&3UC_2X*Odb8c)YG|D*c`$O{O4Y>c^$jP9Wyou;j=~ewVuF0z0Ik-gm*3M{8~+s*9pXJB9L;Y{&+j8s?AcHKy@Z+&z+w*+gk!M=~x$x`>wE6j|Zc!_R##A z-;avRTrv)(&7nRJrH}1>hhh~mgLaOd<7_ z31;zf?gX+tSlzUBkaw9V+h|SWr|r!ak6u}kEbw@q#P5RH-GnL67!gMnW`2}4QkK-# zdqs$~C46elll_dQtlR4%l{DVrcx!&)^lsq~2QiC$FQ+zL4mYRvp3eA_`k|{QQ)=b* z2Z^_kG5kGtGJR>C?5xh8XQS^s{WLPY3IF%70z1~Mu919i2HX^%F*XMf?~wj@Dp6e` zgLrT~+l2I4nQp&epDISBI?Oew3)Z>G@(B5PNhPev^?rKHxtg%dnWy-V;5=Ga#Hh&k zdd-}5mYl`HfRpMO@*wkP%@7IeM-IIxx{q>VY&Geni76OwHjbsRE-d@Gu$;oPKD_BM| zQP}w%WulqpHFgyrUgiH{lh>pccj#8B8U#F&pVT){{aU#kiNM4*Edk&lCF|?RfqA5M zQ6(oI5l4tF?iy|AV`e7*~%lf0XEPQ)*! z3m}7SUV#oaW~AHA`8+lK0p+B_3Tb(!=btpW4#+ zjX5}^^s^@+Crj7;m9}qsywTmj{6|p6s0J6OSR3MYdT4m*4!%fKYa9NNGo(%q!#)TW zMeb1uI)uJq5BNb;fa4D7IOaZ{ZaFeru*#N`%o#06=Cl{!7ZIu|m>aqG*r@lD)QE=v zCpW90Reba4emlkw9uMj{5_(ohH!fq%3vB(zn{TbEnXH5$5OB;hu1Q#Cqt2bdS)w$I+C%U@=3Y6OJzFS>y zcJk2QaG7C%x)uu`vz8^Y zInlWbfWplmH3OMG%7zQ*7}NI3Y(?)|LV^C)pYYMvZ7S~0gwfX6DuzyNIlV^m3~i$| zJwuq$%dz!9(8H(lyXv{q?=d&OByaff51lu9-}B?p8uNd-(biDH`=koAKDsQS2r`Z3 z2TtjJD45F}5RTz5gmT4d5w{2X1$(1gG_#^$WGJ3meAy@=>Lye@e%2OYz$03g`c2>1NzH&4jN$56{J!?jJREqVY>=p=y>wZsp47}@FE~vWIn6HZEw=wqU!RGOeIXC z#;w|Ie)9Bu^%)(QCh^u!%OS0$#MXd)MfFDcPw_ybtG}kJ(SY~xeeNSXwlGog{VSW= w`d9B`zrC7bpnKJTiTR2P8TtR)$NYAw_Qv~}e-4QM9(tuDrv|E$`4aMf0M43JzW@LL literal 0 HcmV?d00001 diff --git a/doc/source/conf.py b/doc/source/conf.py index 63a971653..b7f706054 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -12,10 +12,13 @@ # All configuration values have a default; values that are commented out # serve to show the default. +import glob +import shutil import sys import os import urllib -import shlex +sys.path.insert(0, os.path.abspath('.')) +from custom_directives import CustomGalleryItemDirective # These lines added to enable Sphinx to work without installing Ray. import mock @@ -67,13 +70,33 @@ sys.path.insert(0, os.path.abspath("../../python/")) # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon', - 'sphinx_click.ext', - 'sphinx-jsonschema', + 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', + 'sphinx_click.ext', 'sphinx-jsonschema', 'sphinx_gallery.gen_gallery' ] +sphinx_gallery_conf = { + "examples_dirs": ["../examples"], # path to example scripts + "gallery_dirs": ["auto_examples"], # path where to save generated examples + "ignore_pattern": "../examples/doc_code/", + "plot_gallery": "False", + # "filename_pattern": "tutorial.py", + "backreferences_dir": False + # "show_memory': False, + # 'min_reported_time': False +} + +for i in range(len(sphinx_gallery_conf["examples_dirs"])): + gallery_dir = sphinx_gallery_conf["gallery_dirs"][i] + source_dir = sphinx_gallery_conf["examples_dirs"][i] + try: + os.mkdir(gallery_dir) + except OSError: + pass + + # Copy rst files from source dir to gallery dir. + for f in glob.glob(os.path.join(source_dir, '*.rst')): + shutil.copy(f, gallery_dir) + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -95,7 +118,7 @@ master_doc = 'index' # General information about the project. project = u'Ray' -copyright = u'2016, The Ray Team' +copyright = u'2019, The Ray Team' author = u'The Ray Team' # The version info for the project you're documenting, acts as replacement for @@ -123,6 +146,8 @@ language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] +exclude_patterns += sphinx_gallery_conf['examples_dirs'] +exclude_patterns += ["*/README.rst"] # The reST default role (used for this markup: `text`) to use for all # documents. @@ -354,5 +379,10 @@ def update_context(app, pagename, templatename, context, doctree): pagename) +# see also http://searchvoidstar.tumblr.com/post/125486358368/making-pdfs-from-markdown-on-readthedocsorg-using + + def setup(app): app.connect('html-page-context', update_context) + # Custom directives + app.add_directive('customgalleryitem', CustomGalleryItemDirective) diff --git a/doc/source/custom_directives.py b/doc/source/custom_directives.py new file mode 100644 index 000000000..4d7e84778 --- /dev/null +++ b/doc/source/custom_directives.py @@ -0,0 +1,94 @@ +# Originally from: +# github.com/pytorch/tutorials/blob/60d6ef365e36f3ba82c2b61bf32cc40ac4e86c7b/custom_directives.py # noqa +from docutils.parsers.rst import Directive, directives +from docutils.statemachine import StringList +from docutils import nodes +import os +import sphinx_gallery + +try: + FileNotFoundError +except NameError: + FileNotFoundError = IOError + +GALLERY_TEMPLATE = """ +.. raw:: html + +
+ +.. only:: html + + .. figure:: {thumbnail} + + {description} + +.. raw:: html + +
+""" + + +class CustomGalleryItemDirective(Directive): + """Create a sphinx gallery style thumbnail. + + tooltip and figure are self explanatory. Description could be a link to + a document like in below example. + + Example usage: + + .. customgalleryitem:: + :tooltip: I am writing this tutorial to focus specifically on NLP. + :figure: /_static/img/thumbnails/babel.jpg + :description: :doc:`/beginner/deep_learning_nlp_tutorial` + + If figure is specified, a thumbnail will be made out of it and stored in + _static/thumbs. Therefore, consider _static/thumbs as a "built" directory. + """ + + required_arguments = 0 + optional_arguments = 0 + final_argument_whitespace = True + option_spec = { + "tooltip": directives.unchanged, + "figure": directives.unchanged, + "description": directives.unchanged + } + + has_content = False + add_index = False + + def run(self): + # Cutoff the `tooltip` after 195 chars. + if "tooltip" in self.options: + tooltip = self.options["tooltip"] + if len(self.options["tooltip"]) > 195: + tooltip = tooltip[:195] + "..." + else: + raise ValueError("Need to provide :tooltip: under " + "`.. customgalleryitem::`.") + + # Generate `thumbnail` used in the gallery. + if "figure" in self.options: + env = self.state.document.settings.env + rel_figname, figname = env.relfn2path(self.options["figure"]) + thumbnail = os.path.join("_static/thumbs/", + os.path.basename(figname)) + + os.makedirs("_static/thumbs", exist_ok=True) + + sphinx_gallery.gen_rst.scale_image(figname, thumbnail, 400, 280) + else: + thumbnail = "/_static/img/thumbnails/default.png" + + if "description" in self.options: + description = self.options["description"] + else: + raise ValueError("Need to provide :description: under " + "`customgalleryitem::`.") + + thumbnail_rst = GALLERY_TEMPLATE.format( + tooltip=tooltip, thumbnail=thumbnail, description=description) + thumbnail = StringList(thumbnail_rst.split("\n")) + thumb = nodes.paragraph() + self.state.nested_parse(thumbnail, self.content_offset, thumb) + return [thumb] diff --git a/doc/source/example-parameter-server.rst b/doc/source/example-parameter-server.rst deleted file mode 100644 index bed1fce33..000000000 --- a/doc/source/example-parameter-server.rst +++ /dev/null @@ -1,127 +0,0 @@ -Parameter Server -================ - -This document walks through how to implement simple synchronous and asynchronous -parameter servers using actors. To run the application, first install some -dependencies. - -.. code-block:: bash - - pip install tensorflow - -You can view the `code for this example`_. - -.. _`code for this example`: https://github.com/ray-project/ray/tree/master/doc/examples/parameter_server - -The examples can be run as follows. - -.. code-block:: bash - - # Run the asynchronous parameter server. - python ray/doc/examples/parameter_server/async_parameter_server.py --num-workers=4 - - # Run the synchronous parameter server. - python ray/doc/examples/parameter_server/sync_parameter_server.py --num-workers=4 - -Note that this examples uses distributed actor handles, which are still -considered experimental. - -Asynchronous Parameter Server ------------------------------ - -The asynchronous parameter server itself is implemented as an actor, which -exposes the methods ``push`` and ``pull``. - -.. code-block:: python - - @ray.remote - class ParameterServer(object): - def __init__(self, keys, values): - values = [value.copy() for value in values] - self.weights = dict(zip(keys, values)) - - def push(self, keys, values): - for key, value in zip(keys, values): - self.weights[key] += value - - def pull(self, keys): - return [self.weights[key] for key in keys] - -We then define a worker task, which take a parameter server as an argument and -submits tasks to it. The structure of the code looks as follows. - -.. code-block:: python - - @ray.remote - def worker_task(ps): - while True: - # Get the latest weights from the parameter server. - weights = ray.get(ps.pull.remote(keys)) - - # Compute an update. - ... - - # Push the update to the parameter server. - ps.push.remote(keys, update) - -Then we can create a parameter server and initiate training as follows. - -.. code-block:: python - - ps = ParameterServer.remote(keys, initial_values) - worker_tasks = [worker_task.remote(ps) for _ in range(4)] - -Synchronous Parameter Server ----------------------------- - -The parameter server is implemented as an actor, which exposes the -methods ``apply_gradients`` and ``get_weights``. A constant linear scaling -rule is applied by scaling the learning rate by the number of workers. - -.. code-block:: python - - @ray.remote - class ParameterServer(object): - def __init__(self, learning_rate): - self.net = model.SimpleCNN(learning_rate=learning_rate) - - def apply_gradients(self, *gradients): - self.net.apply_gradients(np.mean(gradients, axis=0)) - return self.net.variables.get_flat() - - def get_weights(self): - return self.net.variables.get_flat() - - -Workers are actors which expose the method ``compute_gradients``. - -.. code-block:: python - - @ray.remote - class Worker(object): - def __init__(self, worker_index, batch_size=50): - self.worker_index = worker_index - self.batch_size = batch_size - self.mnist = input_data.read_data_sets("MNIST_data", one_hot=True, - seed=worker_index) - self.net = model.SimpleCNN() - - def compute_gradients(self, weights): - self.net.variables.set_flat(weights) - xs, ys = self.mnist.train.next_batch(self.batch_size) - return self.net.compute_gradients(xs, ys) - -Training alternates between computing the gradients given the current weights -from the parameter server and updating the parameter server's weights with the -resulting gradients. - -.. code-block:: python - - while True: - gradients = [worker.compute_gradients.remote(current_weights) - for worker in workers] - current_weights = ps.apply_gradients.remote(*gradients) - -Both of these examples implement the parameter server using a single actor, -however they can be easily extended to **split the parameters across multiple -actors**. diff --git a/doc/source/example-rl-pong.rst b/doc/source/example-rl-pong.rst deleted file mode 100644 index dee2ddd85..000000000 --- a/doc/source/example-rl-pong.rst +++ /dev/null @@ -1,118 +0,0 @@ -Learning to Play Pong -===================== - -In this example, we'll train a **very simple** neural network to play Pong using -the OpenAI Gym. This application is adapted, with minimal modifications, from -Andrej Karpathy's `code`_ (see the accompanying `blog post`_). - -You can view the `code for this example`_. - -To run the application, first install some dependencies. - -.. code-block:: bash - - pip install gym[atari] - -Then you can run the example as follows. - -.. code-block:: bash - - python ray/doc/examples/rl_pong/driver.py --batch-size=10 - -To run the example on a cluster, simply pass in the flag -``--address=
``. - -At the moment, on a large machine with 64 physical cores, computing an update -with a batch of size 1 takes about 1 second, a batch of size 10 takes about 2.5 -seconds. A batch of size 60 takes about 3 seconds. On a cluster with 11 nodes, -each with 18 physical cores, a batch of size 300 takes about 10 seconds. If the -numbers you see differ from these by much, take a look at the -**Troubleshooting** section at the bottom of this page and consider `submitting -an issue`_. - -.. _`code`: https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5 -.. _`blog post`: http://karpathy.github.io/2016/05/31/rl/ -.. _`code for this example`: https://github.com/ray-project/ray/tree/master/doc/examples/rl_pong -.. _`submitting an issue`: https://github.com/ray-project/ray/issues - -**Note** that these times depend on how long the rollouts take, which in turn -depends on how well the policy is doing. For example, a really bad policy will -lose very quickly. As the policy learns, we should expect these numbers to -increase. - -The distributed version ------------------------ - -At the core of Andrej's `code`_, a neural network is used to define a "policy" -for playing Pong (that is, a function that chooses an action given a state). In -the loop, the network repeatedly plays games of Pong and records a gradient from -each game. Every ten games, the gradients are combined together and used to -update the network. - -This example is easy to parallelize because the network can play ten games in -parallel and no information needs to be shared between the games. - -We define an **actor** for the Pong environment, which includes a method for -performing a rollout and computing a gradient update. Below is pseudocode for -the actor. - -.. code-block:: python - - @ray.remote - class PongEnv(object): - def __init__(self): - # Tell numpy to only use one core. If we don't do this, each actor may try - # to use all of the cores and the resulting contention may result in no - # speedup over the serial version. Note that if numpy is using OpenBLAS, - # then you need to set OPENBLAS_NUM_THREADS=1, and you probably need to do - # it from the command line (so it happens before numpy is imported). - os.environ["MKL_NUM_THREADS"] = "1" - self.env = gym.make("Pong-v0") - - def compute_gradient(self, model): - # Reset the game. - observation = self.env.reset() - while not done: - # Choose an action using policy_forward. - # Take the action and observe the new state of the world. - # Compute a gradient using policy_backward. Return the gradient and reward. - return [gradient, reward_sum] - -We then create a number of actors, so that we can perform rollouts in parallel. - -.. code-block:: python - - actors = [PongEnv() for _ in range(batch_size)] - -Calling this remote function inside of a for loop, we launch multiple tasks to -perform rollouts and compute gradients in parallel. - -.. code-block:: python - - model_id = ray.put(model) - actions = [] - # Launch tasks to compute gradients from multiple rollouts in parallel. - for i in range(batch_size): - action_id = actors[i].compute_gradient.remote(model_id) - actions.append(action_id) - - -Troubleshooting ---------------- - -If you are not seeing any speedup from Ray (and assuming you're using a -multicore machine), the problem may be that numpy is trying to use multiple -threads. When many processes are each trying to use multiple threads, the result -is often no speedup. When running this example, try opening up ``top`` and -seeing if some python processes are using more than 100% CPU. If yes, then this -is likely the problem. - -The example tries to set ``MKL_NUM_THREADS=1`` in the actor. However, that only -works if the numpy on your machine is actually using MKL. If it's using -OpenBLAS, then you'll need to set ``OPENBLAS_NUM_THREADS=1``. In fact, you may -have to do this **before** running the script (it may need to happen before -numpy is imported). - -.. code-block:: python - - export OPENBLAS_NUM_THREADS=1 diff --git a/doc/source/images/param_actor.png b/doc/source/images/param_actor.png new file mode 100644 index 0000000000000000000000000000000000000000..e43a6a004ffd63fd0727a2f99f9b4d8f6a947e37 GIT binary patch literal 19644 zcma&NWmH_j5-vK};1=8mNpOO@hM>XSJ-7`{APnwq!9755cbDL9!C`QBx5qi_+_%>K zcmMS2-g~!Hb#?8k`l`c}6r|8mh*1Cl0J@CyR}}yN2K-*{LIS>*m*%Qn0DxS8%vUir zkEN5J$c|)lsTJ1Y^eQ--@u^B^pT$3Uq9Jd8q>S|oG!rD+;|sciQjUtk5|XqTo7w!T zaMM39IB7LIiT|#>_TAKU@t+pVGGRU;E9~+HJT@s-gYAeGp)o2J=;=nlu=7=7#^r9c z<;iW`OJVL$>&?lIVaHYW+w&X5#3Zlo<@K(!v#_~%-(n{<)mh-1tjcgr&9KeY70Dqg ziUxs&*|o;r&I#khxbVJ$44+C$mhj~94SC$1PUDl7@{dRG;myNC#uY_=|(1XKat?fuG-MZ0> za{uE+5h^Fs$mk0Fp1i(TcekiVz^Q9*NgeEsxtVR^t72^~L*Nb-@=wu5)uZ8kfM#@u;IoF7VG4M6=(eZO_zy z)7b`!&qIa|zv~`gY+R_xrh#6PS>vf@;sv$fpXdNc+*qS!0m^8 z=ruYlGR~B^ccrT8CCIX%qE+s;h2F)_#bOV3bGporlzKW^Iprb5%35Asd$lS!2fp;3 zuJ&w>rAvq|>KVQv!!^4z)YcjXTx`fII>;EbT#FESc#lOt)4MgRq@vkXj*tEH8xsv~ zVPs(WT640|c#EMPvG0+!&tPe@NpL<6NJYBA= zK(+^Ml$N#h$wrU_u*dNASzj`Ryx>1Y(yAu2yfkRHHNU++Ih22<qPu`~B`YiL%|-|!-$!MnJ%*Xq`dIXuGH3Yi zecUFpF!9IJ7p(9hD}Vk-@nr@qkOd1>I>U44{ASX=gn8D`tAaJ0Iy2GGI)wHYB2&TT zYze8WJ2?G7+|#idpAe}wT5J1henD%$*)uhembSKcjiKj0ucisfXt!OY|Igfyvuc|= zJ0lY=xOkZ>t5aYiRWcUK#|SH365?@pR$oF}%cevU60g4ZRejc)bT4So*dH7bm|326 ziNE^Wsr1)1DT#J|doTORMm3?za;=TI5COKYpG-tg_en-qtb4h2X^F+@^?sfV9@)i1 zP`r|hQ;+GJG~p(aEmmmu(LUBw>BuigT|?FRiALq!_G4OF1X2+H)pmmNXAg*8>+N(& zO-*gpF}G(sbtMm{+|;NoR?!h@X(a*o_Fm`0v0L^&%V&{(k1LtFD*#Ag7?ch;n}{1zqGiW~ znUnU(1HhgX_I7x;8fuXhH$N0Pw|6lb@$j<@v^8#hX!@H=`dd$J?Oq6xzoAtra_{YA zuIyiA{A|cGfy(KoGh~**fc& zJR115x0h}A-Doi_-OF0Q%YIa2cHit{DC5JGi{}I4kzm%$h_h4ha@*P_-p7x7Y%@`2 zt72vM;WagPtT|uY=Q$>Q8fq@{l20A6c!>Ju_){)q^;dtlCncR-zwe=~$;LQSZ$i|= zgT+#K_?p%AkI!Z06|Y7DwSlqV;Ihws!z$b<>ZSKuoVhk%9&&k`vdhuY6KqOLSU7QU zGaO7dRyN$SdtTts$eEc)`MTgS@Xw^ZTGu&|5krs8CE;V_j~8{EoF_QhBuplFS9G*I z3Mob2^aY<9{{B_$86wdOsR%pT5_S|3(9sEe*P|3%`1l590sD)R=5x?B5<;X3L6@D_S?EY zCJ`A+Sa@V)97d&kXw<(VhMJo3`JJ687ks=-EH9a%KP-h+gt#Tn&J(I3zr)UoqD|ZR z1-#(j2$-k*{Tsn+ZU_8~ixzrb?>_}ck@U3=HPtKRQ${RG=MSNXJ?VaFY2R$S_!o5b zyoosvjfRoZ#3&`!-i16R%-fHr?C2tRY<8T^6OKG!f!Cq7kL0@i`*#QE(!}g+hFJ}J z_>(_^&Gyw2av$7mCOA$cQkV)N?Y?q~qxwuwN2qaW@m*#}M*o#n|K9)9qn%d9ePfxx zb>Qfym#)>HVv!dvE(^rNLza}>cLvy@K&R$2;h|Ii{!`5Jg*s#HfyM<8!5Gjs@PDde z83DZiX_5B*_g&`y)f!X5nrONj|C8msp#Q~CBbga~TXAkq857#hvaolr>U7rLbHvX! zr*X4&bJOGKo9iEk1lIqKYD{iVY3Mze!T z?}7(HEr@S^^ctOR{~?KLNsN>e0O?B4Hh{D{?ySF-pN~~SR@njSM*Y}qI5Cwe98$$u0-AM#=fbDa0)fx1XIVA47ulIaM0rZxJp*^;5%|bfoexpx zWdj^|n!0b3;!rN5{>g{VM|TJ<1CUACJ7oP_g!Ef7#(2b2uj>~gmka>(n#g6qV$!v^ zTDd^La_?pJ?CaVTgq!wbSY?VjjI9`iG~_M7=N_Xo0Q0NnIpTm{1uxIIYhB)#ZF;hZ zGk^-bM$;$$ikPcOrs(mQr>?u)(UM~f=-u1%o@zGaqWvsveN$*(!o{%KEbM-D#{Mp` z4T|RijH+`%dxLv97xIX{E1>06?RAfZ%RqH`>*ndCKg7J3zVkEPZjspwdChaiiUDAS zeE0$Yf$q@?;XuYopI>)AOE8{~Xd2L?pakNt_g(@V#Z!%=Hf~5DZ+Cg18P5x$SIpeU z5UZKM^_T0Syt$Y&Fd7h*AGjBilbg_wEY@rB9Bk^ygwA?Tc6U*^pwIK0CJqI@VLR^*m?18y7#Rq0N$Eu_C1-65;1R&3* z7a2_(u zXbbs`mgOsQFUJpj-0Unc-Jl;wTndPO+uKd=--0>hrC6It7uKJZ$iTbwY^IsEE)e9gi`U4&e)x0)&nT1OlnKnHmCug7OeV0RZD5fT-vYRj@yl7l{TwjtQGo z5f;|ZKW|G&(&st&O{@!`#`{^+H@cf|FcY>L?mj^Rc{2b6B}B5|Y*O9Mv_yFnv&tRa zAWBT!$LLS4WEMSu0d0_s%YaZvh`jr}v;ug^_h6Jv=fC>GEWyRHHO)XS$X;Xac=5Bl zA?In)ak^3OjC5M_?WiiV!WVZb>P}djK}%rz&P7Hol=mh&a88nm$|402=%5xIV7q*S zW)N;z#pZUy4;UFC%`p#%tpq3Y!pC%kuzsPVWC9>?!BFRPgYU@!AVe^%$)h|=B2Dae zOi=|qxMp}~0FW~16bZyaET7zoNGr(MKu^@hfPNJK2>HN84VnB3;C-myVd2VSGZ)>C z`30eUt6a$|GVQ{7G{De^vk%M1q4QU)a++e}Wf05QkJd&#Ak;oCFtvW9LImsd#$O?d zGA}6BH0=zCRwnU}iV6CALoYET#gT^I$`GJclK{G^{cT|}Ls0Y`)|X5AFaKaZ6p-@L z7A9Ano*Oow=_6mqVL)}f1HPlRV1+UqC>WtcV8Kz~E*WAWc5QgI!6q6R_nH*E_2dQY zgxtkq0tn4j?}gJu^-gh$Fz(2k+e0O_+?D)!4?YaJfj}5KTb+~Gqy87EE17lYauw2P zcpE%iWV;~pTo6XL?{x1EY7x3bEqXxRq(p~iTHym6v@Y~dMtBc;;n+N28*$fqo$e6(tj7HH5UTcJb^-8EqH%E+mdl$S2l5k8 zGz^Y6%4}v=_WBUXEy+o5u%LTB8 z+J}mEU4f1h%&BwW_{}wO)8#dY?!RS&1z$tna5B$@E^w|@Yjbk16XS~O{=!&0`D~G! z6qG5p08jNjCldKW-!`){VWGpZf`L{lNFe@5O2JaSaWTVl^lOCn?G~8!oQWKQXi9C` zHGhSBilEhDf$zg#B$q?j3K3AK$;dXPdGuLn5;)2Eo7qB7~WFBEjQqiRp>_plqyutgzBYuVkj>O=r6~C;=^J&ILcjmLdq_+V zSN#70;k-`_CtxEkZSo_-AV2>gR6-?5?3 z8(f*cfOwEzWFSK+Js1x%YM!=*LECXa{HPFtZ-)H}=@=dU+$c zA4vqjqH;vcnztaF>Heh%>odpSA`~3`M9D*W)QnKuKCbzl@zT14>_?x0&uDaA zvOVzA)Sa;!n*|Wif?@v_Cyv28ej%hJC0sNnD)Hm(%J#MzCT0X+w8e|s`0+{z4NiO$ zc0-~8Vy%$yKrGB;@w2M#Jm+lV1&pg@&CC7IaNjL2w{1(_~`hGAQS4khqlVzh01 zko?$?6bh<7W*=BTO=Sep0T!{TWbhGQ#8M37F8E5^o;9J%i$B<7Ukxin&L7d~Rq4Qh zka_#<*TQX>@*kP6M=F;aFq3crJN7*a{37iVOCyh?Mrt8SX6lK`6VPqEim^Wss=ZCzBc|VdjMje6SC%h?SC1 z+D2x^*z6cAQi$+XtE7sZ6cqUQG?|yrPW&~BJSV?qH+cSP#?uN0r_*LiM*C93M{o#; z*BpO^eKtpBw#}Za5zh=)|MrKFIc zPL!n+c%d|IqsI|T%=llAuk9*QKC7CDH1z6!-p$&7T$@Jc(y}t|{K7)>mEx@I?9SBx z$hAl?qzw| z3$n3YG*kelZTRB_V!3_){!pJWp=jP^J-V$0UuHlA5XrhqvV)6BjC8IoteojXrM)C!|AJvi5eS% z+Bem(y;tO7W}fx^E1p7Kouk;y0mRPwLr&9(s%lKx=vbyzgWfii?(G?#j0|FfS38B% z+PyM)UapuC+;HXBmxmrwLbKb+;(5s+eLwt6=MbW=>1y4@NeuaLrde z(RDK)J~|rHQd7f<@S*#qerKfLO1VI;J!WnJbt3r}YCCyM6+7b*J5G!HKj*hpm|pHr z-d3vyXA8$4hT#3mA332yzTEGoFEwd6rXTg0x<1cSot+y&f|Qsi~=* zl?B_`XvvUHz={*7ktDQ~i#0%Aox%j98KRtoywAp+Bsi3K2n&Zz_lx(?erSTgE!V*H z`|ooV`6%9g2_91mvbp1?`T0Npi_*xW0cc7^E#(#8QjizH7d5#2V?E5sN2Zom{x6mqp; zJmzN$&sRUuT+lP2o~1yo8JmD z=(IG-)g5A&Q*#+C9JBX&NX-$5BPdgF{-ucp?g;(Z zV0gjod%wMFE5y01EDSMY;q2G5$u)>t&a z9y#_sjuZ9X;=t^ZM|64*=3M+GaoB}z`XlyPMo1hc~W|G&b-^d*7ae)kvN`(SKr!kx$Zy~m0i z-e-bYAs3?i-**zarhv7r3lEmZB@U0VG%g%TsHu(3QBg<5fm^G{=hUaq^Kz%EM#@YW$Rd!5@kglvtfb7yCVjO_yjDk^FWiBEzK zNLWyiX?AWd>eQ2=5I5HM+-bFu8++R-J9 z+`D9493dT5G&}DZjQr`*o%OwUfIR7L&g(kht;4lhjHP{~d5;gRKa9Imq4cH3PpJ9x zKG_Gyy!Cw>z^J}!bW(2Qf>2k};XlNG{1$jr(aMsyLeBuJMd5o> z1Rr2}x9Yx**<}5M-o;Us!{lcPiG7YxzZ8o?Jl2ut2eF*xPp1vYjZ=1t-lm|SKScsA z-kH3nP+RDDkd!a>e6wIkK<<4mX1rMQ9ZIz~l`lnYZ;TU1$MAao^b8-PfR06YcSky9 z>sl*J=WgdqAcg@ifOkzXpCa^A9@di5S2$8Ev(n4*vPLS%TwJo8XK+3%&tk)=ofM4%Sot{u^{s< zWrQkFsU}UA;a~SZMR6-(#0!9^UPlU1H>hKrf>8l3(f&sK0dW?8DR1RaX)##Q*{cUz31Z8jmSZyIIX(Y-*V)Y4LhapcMw+8gCG!rT~I4P|C#&OXA1 zhl-<69w1MmWL%4H4gk1U4(E?6uqyJeofYq~wK;nMSTRen5`oXv7G!MJ+&iXSnM&!K zPFQz1eGZ*3ZkyR}583KoG`>wVk+Lu{7>-!HAAD? zGK6Z}(2{hG@gtZjr~%>o+&AMe9LWHW0;xKqI8ElX0kp;s(7T(JL=B>^(TEr2x{WAz zYhIO(?}899#c=Ew=)5;B0Uuh5wC$1Ozi9(j&5yYsMv77@eL^+1q{JY%uIpq2#L{tp z=!@gT-(;zIDkE*;1V-jqj42~S&x0T@M}`EwXq#WeLMc^Fi7u#M`%=$^DzPG8#4@(# zjmo(t2g)pKT9PIxtP?|Ij=Ui-12W`Cm_s}VgFxX3NQ@XFOr+4-9X8mRJ zQy>h0h{t&j(%LqnR1fC##l@i+mZolE;CJ&rPuRLOTf?s-pi*5c33ZaL&M+l{HgCDb zmn?0GvQg*9qA;UwFqp+HKOutAu|kf_ULJ3<;VuR!AbRlb zLY+-GqNEjQzggiK6UCV9t>;cdU12lPs(J`W91ZRKg7MQfK8SygGy^PbyfrP-pIeI| z{=il+pKpCX_YO;SGA@}pq|64&;rPg+YNjij$xYKOy!igvylbMdwffl1NrA;mEtJX# z3>PoCEb(rL2N+(ID*E&)VY@O(VjdL;0a7^;o7w+EcrtHI3NW4&}N+#dwYg>#1&FEJnyseRiQb4rqc z8ypw4s4@R8dF00{>uS1+b>777Ah6X^tYi{^CL=ADCYeLWEvZ)S%#dRu(@3$`5`l1A@)!dc|Sx*!9Mx zW4|V8#th&HV@b`*XlNw=+k7g#n~e*{{Do3N%um?2M}iIuWU`N@9RD|dGx)nEr<=t| zfiMXxZ)F1}!d7yOSB+hCPLEE}8hxosEtA~3xMt1B3O*Va9q;(Zh`xp&@1d;mDi$ER z{>#!=MEr-lj=T1A9Vo!2NzqZ&L*1=tK|xtNl$2{T!>_=4L7=~KSd@W2BaBt0X*p0^ z)jO$tMQyA5BcwX{>3fXoQE@R5uL}G)8Y*I(HYK5WuO!JA9T3KbDGLU0i6SZeV`B;oHVnCqHQ zsi3iOAGMibC(^k-{LsT6NoDNQwrj=%MX!!ksei0ly3kQq|M-J$gJ9sbuR;5t_u-$S zzE$rH3K5^@ru{m1r=|8fHs8%J#G}x{xw(H|ygO2fQBGst z{$;jQh5bp^PR^Kzeqtik%D1Lp)4FJUz-`qU7>>L#&Qv5mP>DTs-?tC7J&&~GjBTNd z_sfxp5pHGCu|X5v^25d9C>i3GZD(EdRcEQarZ_qdR8rIQ z5acKpr8C+vGMi|QFQn693~OP9Eo%cbv9zGG*_E>OMB+*Z|%h>x?wF$_7>G;-C~`Ipg(emvfKQEk|*t-!f<$sb{B7chH( z%V6;bOgqT{US{jlwRYW6b41}`fi}DbnOm_{+(05K7hqSEV;W;++jO7V$c(a0*l>zUWA}y*rg!ko+ znC;Ln__(KhF$|_Vl#6ztXtkF%dJwlSgj!Z&!q~wbYvEt!vJ@&(u7VZd!i8%j zl8IPoN5i)#XG+xfpnY!c2ra(3ncwGaznvca-@?K7f!I(^mZYN6YZETn;C zRrV2!%vHuKtK%Q`t)b+Q{qjf=%|O#n=}ZoqR-;Jsi+;kz|HQ43}22insbTNisNOWu7YA(c;id zrEI9AO(Ic_*5pr@GQrE zT944C)X15DUsuOnMp#WX&iyqlGO(4ir2p^VVqoPhB5{2IwkPNDEIOmv#BV+7MH~z) zDdZ0{k05>o)Kmbtr2NBe=Fj4MNtP$=NIXq*8b0Jc^+PSv>WkUOCgpT;>Ex7oAYy=< z;p{YBLKW{P|8!33nWaOAf9q9>QN%PjaaFk-vbu!r9Ga{kj7E05*LS!s&}`Y0%;jZ4;3v|h9Bd_XE>quOP-XYP zF)pN7cKdH!3Z3;dsBnk2Qe!JWk%+(S@11mRM6g0SzE%Dnjue|{!J6UEn>kgoIm4R| zRLlq$YikCZ$-4Fr{*3(#{zaLhdL_swbT$}wMeJTF=d>_VDBZ+7&b1~H_ipM1U)2a% zIc8QYzX<0~%k1+(6Ztt_TgcRbJTrM?-a&-0RWQ5KmwtD!txx|x@gw*~<+uipf;fPlFI)-K1j^CzrKChM59XF_$B9 zGXXABAlp@@_0JyO5Wo-14SMCiADVMQHuJeeiR$*!pqUn@r!d!`QTONVLD_r*Y7JN( zALlpN>1v5(tj%!3B(-K#38imSpm`;A8IhPQs^dCK3v(socRyy-kZA#A`49WL7yOB2 zt2^r(GPi|M{V@#xk#v<+8B1B%s$dD=v%{>Y@eue{G&WfgU}=7v;&R$rW7RHD?J%~A zon9hpTmN&>Mf}7x9bcYI&DPpHMWVO& z8fzMw(gY;Ng8T<@mz!0UzL9b{A8_a~cJ!d0z(W}*t)HmB9}Yf#jGE*aka8?Ow*v+9 za8dsj9r!>Ip$2I>eqjE?T(P47Lk?SpO|4*W!Q?>&Bke$8@=c7CI(&^=+zkk>XQe5E z&8Qpn?2AD+*^}}$xt#p+25ceHVcQN58Y`3j?WbPfUoT3!hU~$RxK3@V`uV|G}ms2CbQX*GCH( zAAX$UjyHmPP@JpbqTi&Qmc`dn*Im)X6W=gXS5Mm45aZK<_W>3C#Rp+Pn&uBn;g~Mg zr2nBy*0cb5_hXbvv-(O&@lB2#vHFOQ6Wg zf)8r)f`XWB(}%9O=raJ8<~*srb6Tt{Hd3!oAN-w^zlwEJ*Y4kQ$>bs({3Dv46E&cC z`DJf{+rP!NuR=U+4IZpu=#M{l$^N_=T5~J(bn-S9Sihzd$;d3qG&ut3HxH!Y1){zt zmzsFw!~7JCB}={LHfz|@@D@94IjHZy5hZ28-FZzrPbg*f2uA3GnSVF+RJl~E4qMEI zK2%RejEOy?MwlurVQDZd{Z@zjUae-K`|uR1`t!ABk6Z~&W5W49OSV~r+>)7W=!IRp zC~xOVA*Qo2ub~M)w0AWUCD^eDP=^ig-=b1%QoL7QAQU0$_DD`0wdT?d1g#y+ z_Osc-mZ?&hCXWuQlqRX18RfKmN}K)*XJyT$3OIa~%#c=RLv(rGj$Wnf315Q#NdMB+ z^f7`I$f6EG$XPCQL-}~u#ZYvIlwIw7f}7N%+>QaLwjy@z>mQC7R(Gd2wNvX~KadXE z&J9X)QC_Zb5S*k#lj)d0IIcY$B!y$Nj$bUmR$98&qASPaodSLqzHjLsjq5RAgYbQ~0}*YJ6UP zEI;gyqY2oNu+XQO2&%Z}As5Q9#42;>nNR0Eqy&$ifR{{a z7T@;|pW82p)#ZZRCg|9ajj*7!bi`n}RyifNl8o^f0A&!~L2ktlh{w;YE18;mGR2co zMwR_$HY*d6Vg1qlq$4N`As{Ucvvr;Bn|65he2FjLc7b)a`2zj|EO;Zg%|6__7iqaK zY%6#}GM81n4O(lEDna6V??c-}K%+TWfDLYAV7!hFkI*FYpv@$=!l4qHHEENIjHbDW zLq^+zlG9F(9KUZEYC6)ZQsL@yMb`cCVr@T_R#hJ+xgg!+(9JMYFizh`RP&zW4G3JI zLTS~T2WtfTG=Ep9DgjWPsV7w00L9o*ljNU83NOy3<;Qq2jgwPap4R`07iKhqT$36s zjMIoXS`tslgyZg!_SPmWnBHe;&&_<(dYVcuMx$7hPF1UkVisTGq{27;OtZ>*MmJqI zk%ut~tJEz&em}l-_TiC4I9H$ZO zLiRiRCsPJos*6Y!%aaiqJ%@#-ZG8RZ}%QM;k4CWP}xS>KZ^N4mLNnt4~CZe3yi`A#A`cYpeyLm+%`;i%m) zoug8r$~EJ5%EDpuegc}-?C;p7-R!Ido&!JjO|l3)?Zd#DLdCgVK90_Xi?T7o$b|C} z)>z|civSKiA?_Bx9)%PTcSO6y!x!<7o06%ADwDIlBQl@2&FAs&)D=lDHqT>v+x?jo z5(|RvV6#xmC>Zn^r!b%|egJ^UPTqgGzG0S0t!Ev&-r4c-RQUZ+5IGHK zu*_uyAeI(hf$CmO$<y_R4$`Bw^gxOf)yQJ2u(&?(M#g)r&sT1OdYrp5mkijBN=fx&hVd_}bODBi1>)DMS zGPhit#KN5v@9qJxKHnDE^LW2m2aO-=5aUneRF`5UNh>tKgr=+L>V4)%IPxFN%nijg zbL=6&03i-;2^sK(rUQ^9Ki|sa{CT&}X-QwjqYCoKBhHMg#h#tM21!QUg!=IDe!GEo zo_i}uk54aK!osQY>KmEQ;V)c;nSybqe$e^!W8l4P(uD@C8~G6s5L^YxBP5uh!v-Q# zYrg(QO2lqG$$<@0iFm4adMw?{#%ww|F*2nFWLI3F^*YvD+U{fDO5BSQ14+m{DUcR| zN8g8bcL~i!7kefT5M8Mn#G+Sq>Z$0;$9`e8oa>^Z_bz@ubpH~jR!__w{tA=u>&hD6 zHnKM@2E9$Hhpm*gwBbE})O3}e1MR$2*Z~&=C~#$Z@v?5qbjqUUghvpyW230R$JW?IB7TqdN)P44y@v9dpw;7}zwB8;{>)&vhOGKhtwukOBWy`-3g!#W}aukZKlqAM0_yMd;r`|x18oV@(c z?gg&gobJGnRu9?bj``B~A2cVC$Pn+Fexndl(uTs_5!2?@Uc8cMql`)l+xWG&H!C)z)4Wf&j*Jr1rS& zc%Epo3R)hj>6g7KYywPjv~C8UGaQ3eyGDGGGGwT~6$RHm{5$trvAs(ns_*YTRj4M3 z4T0VG0J_$Fr!vt+53Wl{L502k$F8}EcuoaSzmv2Irwta9vR66!U z;hoUs9@-A@*55WfMh!g-9$!%Got}Y;f^sq-v9evlb$$ zTqPIq8`WKgm+Ea$H7fKx7sK$IY^_h(mZPY&HGe9sZ1%0g$l4V-*Oo9oqPi$tqaJ-_ zK&Uf^Yj(4qlZ0F&yE?q%cvdcU;N>8~hPpa6hvkMb6_5qoK&yS@QzMXGzAV)qmmcRN zWnOK8Wx1V5RH`2Lf|8@~@|DJlFSjWP1-)|m@(DQkMD=lWGA7lo8skj}=--rE-)~1m zYyUBp1bHJKx?(zCrzg8SSAsl+W0f2)Ike)0Q{kLn$tuvQafaH%yyK4aQI@;!t9HV* zCZn`b5w>LD)cXMG$Exb;a~(DdEUJEOrKL8ULP9^Y<}~zK-#34?!&vO=KIa{Al05RK zC^rHb{L(Bu;pJRkT)6_=eO(Irh+o~DTK)XFJp+*`sf60PEAvF@?~qvL)9w2mGobJL zHLV9*YwMEDdK~ZHYlvHtQ!*rf-LI%u*j1brPuMp3)2tR13UpaNfXJIv0`Mg1`hN?cpY1rQVP@EjSEKip z<%K6#*@n#{(9-K`YX{j+(@|8I#O}L0{b)8Q&cMCPAf-RJ(%>dkWZ>X___W0V8*9;P z9U*|yN9h5$?C$RN^?h!BPy0xR@n}~c)c^-HJ*x?~b8cK(YEnhLu%gp5GJTtNdMDIx z9G)JHOHVgJLiS3zR3sgYSHwr(6;1eo%Tn4LI+eyk zF%Bd}vFP&47t^EK;QI6Dk77(@WT@!tZn}dBZHoLRO$qJ-Y3y%0ZDpzu>*%nn-SjCx&4+&tvJrL)%Xixf!oaS;5wZ9_GbQ_ z*%I>w@Z(uwl6Grj<=Dw<@2)>p2ITm2y~u}gm#^~1KE}mQHbocwy;&+k?&Hc=g6ex(@9&1i^KMX*yGu|nl5db8JyYd5uVw_nB)da%!U%QRJg zgB1!RUdlRbbKADuQp{S`zs9m`{b{%MIHte&6e_$rR>8AVx7tK?)t1p6URzmcFbn&-Yk5h-2_tU4up#}kzDCWPMhs?;VO(_ z?}03>X0X_vjALhc*NJ+ZhM8gU%h}W;wBsE z4UOOey9k*!(#m4$=L(-)T_bGHZn-?)bGnn?hQq>PVZ0j}9nB|(kJ%+>gv#sx3}|xw z&bD}Q;uCu#GMBanDXg711vBO%cxn;4zXhRinrKI-n$Fg(ro0eCsF0ibrN&0uOG3 zr1O}w5CgD*zkkMPSCalD5#lMgbu70pmh&O=j;2}qf@%DT`_73_eHT+}ME- zGZ(#hirCi=cFWePQZ^&;2DT+~3|BwM^x|-Cqz+|2pq5797(!gsZ)qJml(wXI-)~=f zRK8Wq?jMYPNGpj~wAYGL|7q5_z`0vr*C%gNF8{|o0II92v7;9aT&g)Zq9G|>(_1}e zI6dXAWVw6ITq|X@EClo5fy zRN*l3TwFu9X(lhN%h2iog@zppULS zbM#Kap}p)IU7A(I-oUl%(Xef=cl69lf5!aKrz$pogVkfyq`nQ4sm*T}0~kn^U$ zzb)pGO7jeRqZy<(P4wU9mw5fN$y{Z*gPkmNs1w)#+R$v-!k>$ce36@x8pdBP`3 zbfjkkze>FTEa z>rZ)wDJti-+pG+B;89wl8upbx6WOhsD`fWGQ;ut+C|&EZvC(Td64oj0{qNxySoIv6R;#P_*w zz2XnFj~4SLiO}sOtT9c3-#cc0mMA1P=d%Cpcf_XNIx1_yJm0*Qe3KOUceCE1b6;t=E{O0iHtMtfL!+wP z^DR#&3sckGo=>*J%_Rbq@0b-9+Vy>R^bscgIJ$#o=?SuRDn! ztRpXDQX~Sf=d=^!$nY_8uekSI6B5A_Ng#F5$7b}wFzZ6a6(2oDbjOPcLKvj*@p$cI zKeAYf41on#zED6ucU02fXCiGFeTeGt=&aezorvn`GkV0x=ZfwRWkdu&qF3GvKC1-X zL)wO=e6-YF+KE*2+vCWP;RmwHQ_3ZlP277Tg>;2}jpdx2h;%62&vil+y*sbF4MTI# zmnLly&MsD*Y*g(31hDYl-glcQTWy1)*F^=0ZNZT%U(y7pxFvMG|Gxl#3xM=bJTS6v z?#jN5uuCo4Zd;wRaPMOtQ&BVc{>u_W)nWExyC&_R6_abe78xj_ ztVB78<9pCR7w*67HWAbT7&IzF$N}6}{lheofnX{G9t5ZVgW32U zfRE4k?baWCOSC+}P@D@O28QC5tRx(Z@8pApk^Sv~TH(=^*-vU(;YIP=%%Ux^P>UwVi5?6AsVfzyl!w7`4N55I#4a?2a%04?b=6an=9&tKgn3V(Cy9PxkP8~6iW zK~X?cd?z0)jHd01hO{m%+P2lE9yE)#u)1&l{x?NA1ML7VWFZ1DXvRad0lo0MJNY+L zz>irdzJs+%AP(Ar-Z&qQd-#Fr;`#A^(30E>L|dF&4q!&ow!#}VSXiI}2y+Zk+1hC4X2xR$z#=o2W zWAXjo&8buLBsT$+3ADv$91DR5FNkK~1(b!r4TCX(IA~73V>3GFr$t-P-K^^Z2mlB) zy@#A(0K@m74`>Xl62}m51JKDR0qt=-XoKS*^8j_wmq3a@4EzI_0NkJx1mc*vnn_#e zy%ufPwP${6)^$NU=qG3i8qtiLj17DbD;eKG-|-*Jitroc5&tDnrq38~fiJ`h(g|4d zI0n}Ne+WNztxYfu576_om+r6>#Is#Xx+dOgHSK{QxFE7vMu90me2p}PBC8O`5Ci0X zE+RUCHGPKikN^-^<1_w;5tdvocm+6_xiY<`CF2P#+Ln9ccbG+6C^(*v1Q-J#o)usK zdg48-IJq*&X+oea_lXm20XQ_R$9JG9XiGGv@8#z;gSL%TDVmmeLuK*-vuH~og24=c zf$t!z0B15v2yAJgT}~4mC-;FvmWj6H0;6>dSd}y*!he+KHH)?-`Ek{nc?^xNL9=K} zIt=|M;{q}ay3lNgjv?0w86TL*D1G2G(*~wsfRM{Fexunrexvi63E*d6X}A=(biT_{ z5T7$=&h%$`3(uSNP+$}hl>Aosj1J2j-$7YO=+2-`V2+`Xvq2o7||3J3iyW& zNtS>(;%1-TS~yFKwz*l4%;X)KQGUM8?j6nlDBgqx{!pS{I10*bFIVb~Y z)Ic2aO8kMPh~wmZ!vA5N(=l=%yWwj&t8y-2BXrHWJsaFbX3>^pN3Q?mIswf=djf4( z%p{wTL&&2Vh=bNJ4si@3!1i?*IRT>qh2Q&XCH(|53L z-T^$}*@zM@m= z%M*{AMO*S|Q)nQJ1GzDR4tWD6fVTJz?hv(T6!~WyOYT!(2!4aB7V;|xce4Qe&(Bt# zPfl3a6qXP})B*sIgTZD@`;{xqV4iZih~F_{!VgI+^~$(#=ndM)7k8EU5Ary1c^o=;0iNLh1Zbo$^6MJaUwpE3BHQ)w zYJX^px;FLYNpF}zTZ+h0<~=B9A0&5r`495Xqz|Ag&Iv35rjVSPnP>7sU&(t(@e5nS zn(PCGNK0P4qGv_c5KpbuPPIjNR#j2LxfDQ*w%W8s=v!9WlG;FNTV0&Er+01cN1V3Wv_(vP+|mVJ+oaylnKS3c9M_Vk z_G~FQ%bE09;hWt(?oTVc3D3JT9{HDGX}e^}{O@~mWBZ@p=DEn}`WbC^ZE%0Uz z%7SkPOWU->ML#Mjh#PpOxA0R=*UxCXV_n{8UPkQBQ2eyjMe#plhRdIy*r#-T0{vF<&rO#~a$U23cGNO@D@CmY+$%S%n+)I^(@iUF0ZCd=o z|15SdJGgme_Pfzc+YKF=qo6~6#J;hj7B` zt|>|UOPM#m8C4z4q-|biYzwTMy^nc6j0W27+UWiW&+E-yT4QEr8?5@GytuB~(!}FW zZ!7#X8fg3YdiNOg6sg~4(iXGP3UB;|`tqc=pWf!Z91XPHzAo=9GdK<<#VyRUJR{TS z6#ZP+l5V&a+Uin=5AG`d)aljDuw~SN9fbn6y{airJY4FDTjR-HX8D$4u$c0*V%Jp_ z#lP5ImwJZL_EIF!7E|z+>f{kT7eF;Db;}`Y#JKchTyVc z$Bh6i@Oj1Zv?2VCeFSlQdtK_wg?UR~t}9F2(@>tYc6nK1F&1Ci>r;LfO-uo85kbHd zQax(X^a1XwzVaRPlC4CXBFpPUY>kgw9&Sq zB6&LGs=hR-479B&N!$n8LjL5m?P^MYOO3YdxWCA03wZ}^kt~MILO@&S1>AViM%%XP z^ciin$+65RsRV87%aaa(wxE-owyT=c-&Uh7b{vw^wz)d_AZS}v6#oQD?t-?Zo~8H0 djpqc~{y(LquR+Ol+x-9l002ovPDHLkV1o2L69oVO literal 0 HcmV?d00001 diff --git a/doc/source/index.rst b/doc/source/index.rst index c5252da45..d9e1a5736 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -254,13 +254,15 @@ Getting Involved :maxdepth: -1 :caption: Examples - example-rl-pong.rst - example-parameter-server.rst - example-newsreader.rst - example-resnet.rst - example-a3c.rst - example-lbfgs.rst - example-streaming.rst + auto_examples/overview.rst + auto_examples/plot_lbfgs.rst + auto_examples/plot_newsreader.rst + auto_examples/plot_hyperparameter.rst + auto_examples/plot_pong_example.rst + auto_examples/plot_resnet.rst + auto_examples/plot_streaming.rst + auto_examples/plot_parameter_server.rst + auto_examples/plot_example-a3c.rst using-ray-with-tensorflow.rst using-ray-with-pytorch.rst