From 9af8dc568acf296864af434ffd863e88fd175cce Mon Sep 17 00:00:00 2001 From: shane Date: Wed, 22 Nov 2017 10:20:04 -0800 Subject: [PATCH] testing with --rm and docker run (#1240) Add --rm to docker run for Jenkins tests. --- test/jenkins_tests/run_multi_node_tests.sh | 36 +++++++++++----------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/test/jenkins_tests/run_multi_node_tests.sh b/test/jenkins_tests/run_multi_node_tests.sh index e3454a90b..cfeb11492 100755 --- a/test/jenkins_tests/run_multi_node_tests.sh +++ b/test/jenkins_tests/run_multi_node_tests.sh @@ -43,105 +43,105 @@ python $ROOT_DIR/multi_node_docker_test.py \ # Test that the example applications run. -# docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +# docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ # python /ray/examples/lbfgs/driver.py -# docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +# docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ # python /ray/examples/rl_pong/driver.py \ # --iterations=3 -# docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +# docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ # python /ray/examples/hyperopt/hyperopt_simple.py -# docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +# docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ # python /ray/examples/hyperopt/hyperopt_adaptive.py -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/train.py \ --env PongDeterministic-v0 \ --run A3C \ --stop '{"training_iteration": 2}' \ --config '{"num_workers": 16}' -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/train.py \ --env CartPole-v1 \ --run PPO \ --stop '{"training_iteration": 2}' \ --config '{"kl_coeff": 1.0, "num_sgd_iter": 10, "sgd_stepsize": 1e-4, "sgd_batchsize": 64, "timesteps_per_batch": 2000, "num_workers": 1, "model": {"free_log_std": true}}' -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/train.py \ --env CartPole-v1 \ --run PPO \ --stop '{"training_iteration": 2}' \ --config '{"kl_coeff": 1.0, "num_sgd_iter": 10, "sgd_stepsize": 1e-4, "sgd_batchsize": 64, "timesteps_per_batch": 2000, "num_workers": 1, "use_gae": false}' -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/train.py \ --env Pendulum-v0 \ --run ES \ --stop '{"training_iteration": 2}' \ --config '{"stepsize": 0.01, "episodes_per_batch": 20, "timesteps_per_batch": 100}' -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/train.py \ --env Pong-v0 \ --run ES \ --stop '{"training_iteration": 2}' \ --config '{"stepsize": 0.01, "episodes_per_batch": 20, "timesteps_per_batch": 100}' -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/train.py \ --env CartPole-v0 \ --run A3C \ --stop '{"training_iteration": 2}' \ --config '{"use_lstm": false}' -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/train.py \ --env CartPole-v0 \ --run DQN \ --stop '{"training_iteration": 2}' \ --config '{"lr": 1e-3, "schedule_max_timesteps": 100000, "exploration_fraction": 0.1, "exploration_final_eps": 0.02, "dueling": false, "hiddens": [], "model": {"fcnet_hiddens": [64], "fcnet_activation": "relu"}}' -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/train.py \ --env FrozenLake-v0 \ --run DQN \ --stop '{"training_iteration": 2}' -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/train.py \ --env FrozenLake-v0 \ --run PPO \ --stop '{"training_iteration": 2}' \ --config '{"num_sgd_iter": 10, "sgd_batchsize": 64, "timesteps_per_batch": 1000, "num_workers": 1}' -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/train.py \ --env PongDeterministic-v4 \ --run DQN \ --stop '{"training_iteration": 2}' \ --config '{"lr": 1e-4, "schedule_max_timesteps": 2000000, "buffer_size": 10000, "exploration_fraction": 0.1, "exploration_final_eps": 0.01, "sample_batch_size": 4, "learning_starts": 10000, "target_network_update_freq": 1000, "gamma": 0.99, "prioritized_replay": true}' -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/train.py \ --env MontezumaRevenge-v0 \ --run PPO \ --stop '{"training_iteration": 2}' \ --config '{"kl_coeff": 1.0, "num_sgd_iter": 10, "sgd_stepsize": 1e-4, "sgd_batchsize": 64, "timesteps_per_batch": 2000, "num_workers": 1, "model": {"dim": 40, "conv_filters": [[16, [8, 8], 4], [32, [4, 4], 2], [512, [5, 5], 1]]}, "extra_frameskip": 4}' -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/train.py \ --env PongDeterministic-v4 \ --run A3C \ --stop '{"training_iteration": 2}' \ --config '{"num_workers": 2, "use_lstm": false, "use_pytorch": true, "model": {"grayscale": true, "zero_mean": false, "dim": 80, "channel_major": true}}' -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/rllib/test/test_checkpoint_restore.py -docker run --shm-size=10G --memory=10G $DOCKER_SHA \ +docker run --rm --shm-size=10G --memory=10G $DOCKER_SHA \ python /ray/python/ray/tune/examples/tune_mnist_ray.py \ --fast