2020-05-04 09:36:27 +02:00
|
|
|
import pytest
|
|
|
|
import unittest
|
|
|
|
|
|
|
|
import ray
|
2022-06-04 07:35:24 +02:00
|
|
|
import ray.rllib.algorithms.apex_ddpg.apex_ddpg as apex_ddpg
|
2020-06-13 17:51:50 +02:00
|
|
|
from ray.rllib.utils.test_utils import (
|
|
|
|
check,
|
|
|
|
check_compute_single_action,
|
2021-09-30 16:39:05 +02:00
|
|
|
check_train_results,
|
|
|
|
framework_iterator,
|
2022-01-29 18:41:57 -08:00
|
|
|
)
|
2020-05-04 09:36:27 +02:00
|
|
|
|
|
|
|
|
|
|
|
class TestApexDDPG(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
|
|
ray.init(num_cpus=4)
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
ray.shutdown()
|
|
|
|
|
|
|
|
def test_apex_ddpg_compilation_and_per_worker_epsilon_values(self):
|
2022-06-04 07:35:24 +02:00
|
|
|
"""Test whether APEX-DDPG can be built on all frameworks."""
|
2022-05-30 19:45:38 +02:00
|
|
|
config = (
|
|
|
|
apex_ddpg.ApexDDPGConfig()
|
|
|
|
.rollouts(num_rollout_workers=2)
|
2022-06-10 17:09:18 +02:00
|
|
|
.reporting(min_sample_timesteps_per_iteration=100)
|
2022-05-30 19:45:38 +02:00
|
|
|
.training(
|
2022-08-11 13:07:30 +02:00
|
|
|
num_steps_sampled_before_learning_starts=0,
|
2022-05-30 19:45:38 +02:00
|
|
|
optimizer={"num_replay_buffer_shards": 1},
|
|
|
|
)
|
|
|
|
.environment(env="Pendulum-v1")
|
|
|
|
)
|
|
|
|
|
2020-05-04 09:36:27 +02:00
|
|
|
num_iterations = 1
|
2022-05-30 19:45:38 +02:00
|
|
|
|
2021-11-05 16:10:00 +01:00
|
|
|
for _ in framework_iterator(config, with_eager_tracing=True):
|
2022-05-30 19:45:38 +02:00
|
|
|
trainer = config.build()
|
2020-05-04 09:36:27 +02:00
|
|
|
|
|
|
|
# Test per-worker scale distribution.
|
|
|
|
infos = trainer.workers.foreach_policy(
|
2021-06-15 13:08:43 +02:00
|
|
|
lambda p, _: p.get_exploration_state()
|
2022-01-29 18:41:57 -08:00
|
|
|
)
|
2020-05-04 09:36:27 +02:00
|
|
|
scale = [i["cur_scale"] for i in infos]
|
|
|
|
expected = [
|
2022-05-30 19:45:38 +02:00
|
|
|
0.4 ** (1 + (i + 1) / float(config.num_workers - 1) * 7)
|
|
|
|
for i in range(config.num_workers)
|
2020-05-04 09:36:27 +02:00
|
|
|
]
|
|
|
|
check(scale, [0.0] + expected)
|
|
|
|
|
|
|
|
for _ in range(num_iterations):
|
2021-09-30 16:39:05 +02:00
|
|
|
results = trainer.train()
|
|
|
|
check_train_results(results)
|
|
|
|
print(results)
|
2020-06-13 17:51:50 +02:00
|
|
|
check_compute_single_action(trainer)
|
2020-05-04 09:36:27 +02:00
|
|
|
|
|
|
|
# Test again per-worker scale distribution
|
|
|
|
# (should not have changed).
|
|
|
|
infos = trainer.workers.foreach_policy(
|
2021-06-15 13:08:43 +02:00
|
|
|
lambda p, _: p.get_exploration_state()
|
2022-01-29 18:41:57 -08:00
|
|
|
)
|
2020-05-04 09:36:27 +02:00
|
|
|
scale = [i["cur_scale"] for i in infos]
|
|
|
|
check(scale, [0.0] + expected)
|
|
|
|
|
|
|
|
trainer.stop()
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
import sys
|
2022-01-29 18:41:57 -08:00
|
|
|
|
2020-05-04 09:36:27 +02:00
|
|
|
sys.exit(pytest.main(["-v", __file__]))
|