ray/rllib/tests/test_optimizers.py

261 lines
9.3 KiB
Python
Raw Normal View History

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
import time
import unittest
import ray
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy
[rllib] Document "v2" APIs (#2316) * re * wip * wip * a3c working * torch support * pg works * lint * rm v2 * consumer id * clean up pg * clean up more * fix python 2.7 * tf session management * docs * dqn wip * fix compile * dqn * apex runs * up * impotrs * ddpg * quotes * fix tests * fix last r * fix tests * lint * pass checkpoint restore * kwar * nits * policy graph * fix yapf * com * class * pyt * vectorization * update * test cpe * unit test * fix ddpg2 * changes * wip * args * faster test * common * fix * add alg option * batch mode and policy serving * multi serving test * todo * wip * serving test * doc async env * num envs * comments * thread * remove init hook * update * fix ppo * comments1 * fix * updates * add jenkins tests * fix * fix pytorch * fix * fixes * fix a3c policy * fix squeeze * fix trunc on apex * fix squeezing for real * update * remove horizon test for now * multiagent wip * update * fix race condition * fix ma * t * doc * st * wip * example * wip * working * cartpole * wip * batch wip * fix bug * make other_batches None default * working * debug * nit * warn * comments * fix ppo * fix obs filter * update * wip * tf * update * fix * cleanup * cleanup * spacing * model * fix * dqn * fix ddpg * doc * keep names * update * fix * com * docs * clarify model outputs * Update torch_policy_graph.py * fix obs filter * pass thru worker index * fix * rename * vlad torch comments * fix log action * debug name * fix lstm * remove unused ddpg net * remove conv net * revert lstm * wip * wip * cast * wip * works * fix a3c * works * lstm util test * doc * clean up * update * fix lstm check * move to end * fix sphinx * fix cmd * remove bad doc * envs * vec * doc prep * models * rl * alg * up * clarify * copy * async sa * fix * comments * fix a3c conf * tune lstm * fix reshape * fix * back to 16 * tuned a3c update * update * tuned * optional * merge * wip * fix up * move pg class * rename env * wip * update * tip * alg * readme * fix catalog * readme * doc * context * remove prep * comma * add env * link to paper * paper * update * rnn * update * wip * clean up ev creation * fix * fix * fix * fix lint * up * no comma * ma * Update run_multi_node_tests.sh * fix * sphinx is stupid * sphinx is stupid * clarify torch graph * no horizon * fix config * sb * Update test_optimizers.py
2018-07-01 00:05:08 -07:00
from ray.rllib.evaluation import SampleBatch
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.optimizers import AsyncGradientsOptimizer, AsyncSamplesOptimizer
from ray.rllib.optimizers.aso_tree_aggregator import TreeAggregator
from ray.rllib.tests.mock_worker import _MockWorker
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class AsyncOptimizerTest(unittest.TestCase):
def tearDown(self):
ray.shutdown()
def testBasic(self):
ray.init(num_cpus=4, object_store_memory=1000 * 1024 * 1024)
local = _MockWorker()
remotes = ray.remote(_MockWorker)
remote_workers = [remotes.remote() for i in range(5)]
workers = WorkerSet._from_existing(local, remote_workers)
test_optimizer = AsyncGradientsOptimizer(workers, grads_per_step=10)
test_optimizer.step()
self.assertTrue(all(local.get_weights() == 0))
class PPOCollectTest(unittest.TestCase):
def tearDown(self):
ray.shutdown()
def testPPOSampleWaste(self):
ray.init(num_cpus=4, object_store_memory=1000 * 1024 * 1024)
# Check we at least collect the initial wave of samples
ppo = PPOTrainer(
env="CartPole-v0",
config={
"sample_batch_size": 200,
"train_batch_size": 128,
"num_workers": 3,
})
ppo.train()
self.assertEqual(ppo.optimizer.num_steps_sampled, 600)
ppo.stop()
# Check we collect at least the specified amount of samples
ppo = PPOTrainer(
env="CartPole-v0",
config={
"sample_batch_size": 200,
"train_batch_size": 900,
"num_workers": 3,
})
ppo.train()
self.assertEqual(ppo.optimizer.num_steps_sampled, 1000)
ppo.stop()
# Check in vectorized mode
ppo = PPOTrainer(
env="CartPole-v0",
config={
"sample_batch_size": 200,
"num_envs_per_worker": 2,
"train_batch_size": 900,
"num_workers": 3,
})
ppo.train()
self.assertEqual(ppo.optimizer.num_steps_sampled, 1200)
ppo.stop()
class SampleBatchTest(unittest.TestCase):
def testConcat(self):
b1 = SampleBatch({"a": np.array([1, 2, 3]), "b": np.array([4, 5, 6])})
b2 = SampleBatch({"a": np.array([1]), "b": np.array([4])})
b3 = SampleBatch({"a": np.array([1]), "b": np.array([5])})
b12 = b1.concat(b2)
self.assertEqual(b12["a"].tolist(), [1, 2, 3, 1])
self.assertEqual(b12["b"].tolist(), [4, 5, 6, 4])
b = SampleBatch.concat_samples([b1, b2, b3])
self.assertEqual(b["a"].tolist(), [1, 2, 3, 1, 1])
self.assertEqual(b["b"].tolist(), [4, 5, 6, 4, 5])
class AsyncSamplesOptimizerTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
ray.shutdown()
@classmethod
def setUpClass(cls):
ray.init(num_cpus=8, object_store_memory=1000 * 1024 * 1024)
def testSimple(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(workers)
self._wait_for(optimizer, 1000, 1000)
def testMultiGPU(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
2019-06-20 18:07:44 -07:00
optimizer = AsyncSamplesOptimizer(workers, num_gpus=1, _fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
def testMultiGPUParallelLoad(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
2019-06-20 18:07:44 -07:00
workers, num_gpus=1, num_data_loader_buffers=1, _fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
def testMultiplePasses(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers,
minibatch_buffer_size=10,
num_sgd_iter=10,
sample_batch_size=10,
train_batch_size=50)
self._wait_for(optimizer, 1000, 10000)
self.assertLess(optimizer.stats()["num_steps_sampled"], 5000)
self.assertGreater(optimizer.stats()["num_steps_trained"], 8000)
def testReplay(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers,
replay_buffer_num_slots=100,
replay_proportion=10,
sample_batch_size=10,
train_batch_size=10,
)
self._wait_for(optimizer, 1000, 1000)
stats = optimizer.stats()
self.assertLess(stats["num_steps_sampled"], 5000)
replay_ratio = stats["num_steps_replayed"] / stats["num_steps_sampled"]
self.assertGreater(replay_ratio, 0.7)
self.assertLess(stats["num_steps_trained"], stats["num_steps_sampled"])
def testReplayAndMultiplePasses(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers,
minibatch_buffer_size=10,
num_sgd_iter=10,
replay_buffer_num_slots=100,
replay_proportion=10,
sample_batch_size=10,
train_batch_size=10)
self._wait_for(optimizer, 1000, 1000)
stats = optimizer.stats()
print(stats)
self.assertLess(stats["num_steps_sampled"], 5000)
replay_ratio = stats["num_steps_replayed"] / stats["num_steps_sampled"]
train_ratio = stats["num_steps_sampled"] / stats["num_steps_trained"]
self.assertGreater(replay_ratio, 0.7)
self.assertLess(train_ratio, 0.4)
def testMultiTierAggregationBadConf(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
aggregators = TreeAggregator.precreate_aggregators(4)
optimizer = AsyncSamplesOptimizer(workers, num_aggregation_workers=4)
self.assertRaises(ValueError,
lambda: optimizer.aggregator.init(aggregators))
def testMultiTierAggregation(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
aggregators = TreeAggregator.precreate_aggregators(1)
optimizer = AsyncSamplesOptimizer(workers, num_aggregation_workers=1)
optimizer.aggregator.init(aggregators)
self._wait_for(optimizer, 1000, 1000)
def testRejectBadConfigs(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
self.assertRaises(
ValueError, lambda: AsyncSamplesOptimizer(
local, remotes,
num_data_loader_buffers=2, minibatch_buffer_size=4))
optimizer = AsyncSamplesOptimizer(
workers,
2019-06-20 18:07:44 -07:00
num_gpus=1,
train_batch_size=100,
sample_batch_size=50,
_fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
optimizer = AsyncSamplesOptimizer(
workers,
2019-06-20 18:07:44 -07:00
num_gpus=1,
train_batch_size=100,
sample_batch_size=25,
_fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
optimizer = AsyncSamplesOptimizer(
workers,
2019-06-20 18:07:44 -07:00
num_gpus=1,
train_batch_size=100,
sample_batch_size=74,
_fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
def testLearnerQueueTimeout(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers,
sample_batch_size=1000,
train_batch_size=1000,
learner_queue_timeout=1)
self.assertRaises(AssertionError,
lambda: self._wait_for(optimizer, 1000, 1000))
def _make_envs(self):
def make_sess():
return tf.Session(config=tf.ConfigProto(device_count={"CPU": 2}))
local = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=PPOTFPolicy,
tf_session_creator=make_sess)
remotes = [
RolloutWorker.as_remote().remote(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=PPOTFPolicy,
tf_session_creator=make_sess)
]
return local, remotes
def _wait_for(self, optimizer, num_steps_sampled, num_steps_trained):
start = time.time()
while time.time() - start < 30:
optimizer.step()
if optimizer.num_steps_sampled > num_steps_sampled and \
optimizer.num_steps_trained > num_steps_trained:
print("OK", optimizer.stats())
return
raise AssertionError("TIMED OUT", optimizer.stats())
if __name__ == "__main__":
unittest.main(verbosity=2)