2020-02-19 21:18:45 +01:00
|
|
|
import numpy as np
|
2020-03-14 12:05:04 -07:00
|
|
|
import sys
|
2020-02-19 21:18:45 +01:00
|
|
|
import unittest
|
|
|
|
|
|
|
|
import ray
|
2022-06-01 09:29:16 +02:00
|
|
|
import ray.rllib.algorithms.a2c as a2c
|
|
|
|
import ray.rllib.algorithms.a3c as a3c
|
2022-05-19 09:30:42 -07:00
|
|
|
import ray.rllib.algorithms.ddpg as ddpg
|
|
|
|
import ray.rllib.algorithms.ddpg.td3 as td3
|
|
|
|
import ray.rllib.algorithms.dqn as dqn
|
2020-02-19 21:18:45 +01:00
|
|
|
import ray.rllib.agents.impala as impala
|
2022-05-19 09:30:42 -07:00
|
|
|
import ray.rllib.algorithms.pg as pg
|
2020-02-19 21:18:45 +01:00
|
|
|
import ray.rllib.agents.ppo as ppo
|
2022-05-19 09:30:42 -07:00
|
|
|
import ray.rllib.algorithms.sac as sac
|
2020-06-30 10:13:20 +02:00
|
|
|
from ray.rllib.utils import check, framework_iterator
|
2020-02-19 21:18:45 +01:00
|
|
|
|
|
|
|
|
2020-03-12 04:39:47 +01:00
|
|
|
def do_test_explorations(
|
|
|
|
run, env, config, dummy_obs, prev_a=None, expected_mean_action=None
|
|
|
|
):
|
2020-02-19 21:18:45 +01:00
|
|
|
"""Calls an Agent's `compute_actions` with different `explore` options."""
|
|
|
|
|
2020-04-16 10:20:01 +02:00
|
|
|
core_config = config.copy()
|
2022-06-01 09:29:16 +02:00
|
|
|
if run not in [a3c.A3C]:
|
2020-04-16 10:20:01 +02:00
|
|
|
core_config["num_workers"] = 0
|
2020-02-19 21:18:45 +01:00
|
|
|
|
|
|
|
# Test all frameworks.
|
2020-07-11 22:06:35 +02:00
|
|
|
for _ in framework_iterator(core_config):
|
2020-04-03 21:24:25 +02:00
|
|
|
print("Agent={}".format(run))
|
2020-02-19 21:18:45 +01:00
|
|
|
|
|
|
|
# Test for both the default Agent's exploration AND the `Random`
|
|
|
|
# exploration class.
|
2020-02-22 23:19:49 +01:00
|
|
|
for exploration in [None, "Random"]:
|
2020-04-16 10:20:01 +02:00
|
|
|
local_config = core_config.copy()
|
2020-02-19 21:18:45 +01:00
|
|
|
if exploration == "Random":
|
2020-03-01 20:53:35 +01:00
|
|
|
# TODO(sven): Random doesn't work for IMPALA yet.
|
|
|
|
if run is impala.ImpalaTrainer:
|
2020-02-22 23:19:49 +01:00
|
|
|
continue
|
2020-04-16 10:20:01 +02:00
|
|
|
local_config["exploration_config"] = {"type": "Random"}
|
2020-02-22 23:19:49 +01:00
|
|
|
print("exploration={}".format(exploration or "default"))
|
|
|
|
|
2020-04-16 10:20:01 +02:00
|
|
|
trainer = run(config=local_config, env=env)
|
2020-02-19 21:18:45 +01:00
|
|
|
|
|
|
|
# Make sure all actions drawn are the same, given same
|
|
|
|
# observations.
|
|
|
|
actions = []
|
2020-04-03 19:44:25 +02:00
|
|
|
for _ in range(25):
|
2020-02-19 21:18:45 +01:00
|
|
|
actions.append(
|
2021-06-30 12:32:11 +02:00
|
|
|
trainer.compute_single_action(
|
2020-02-19 21:18:45 +01:00
|
|
|
observation=dummy_obs,
|
|
|
|
explore=False,
|
|
|
|
prev_action=prev_a,
|
|
|
|
prev_reward=1.0 if prev_a is not None else None,
|
|
|
|
)
|
2022-01-29 18:41:57 -08:00
|
|
|
)
|
2020-02-19 21:18:45 +01:00
|
|
|
check(actions[-1], actions[0])
|
|
|
|
|
2020-02-22 23:19:49 +01:00
|
|
|
# Make sure actions drawn are different
|
|
|
|
# (around some mean value), given constant observations.
|
2020-02-19 21:18:45 +01:00
|
|
|
actions = []
|
2021-07-13 20:01:30 +02:00
|
|
|
for _ in range(500):
|
2020-02-19 21:18:45 +01:00
|
|
|
actions.append(
|
2021-06-30 12:32:11 +02:00
|
|
|
trainer.compute_single_action(
|
2020-02-19 21:18:45 +01:00
|
|
|
observation=dummy_obs,
|
|
|
|
explore=True,
|
|
|
|
prev_action=prev_a,
|
2021-07-13 20:01:30 +02:00
|
|
|
prev_reward=1.0 if prev_a is not None else None,
|
|
|
|
)
|
2022-01-29 18:41:57 -08:00
|
|
|
)
|
2020-02-19 21:18:45 +01:00
|
|
|
check(
|
|
|
|
np.mean(actions),
|
|
|
|
expected_mean_action if expected_mean_action is not None else 0.5,
|
2021-07-13 20:01:30 +02:00
|
|
|
atol=0.4,
|
|
|
|
)
|
2020-02-19 21:18:45 +01:00
|
|
|
# Check that the stddev is not 0.0 (values differ).
|
|
|
|
check(np.std(actions), 0.0, false=True)
|
|
|
|
|
|
|
|
|
|
|
|
class TestExplorations(unittest.TestCase):
|
|
|
|
"""
|
|
|
|
Tests all Exploration components and the deterministic flag for
|
|
|
|
compute_action calls.
|
|
|
|
"""
|
2020-03-12 04:39:47 +01:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
2020-10-16 20:53:30 +02:00
|
|
|
ray.init(num_cpus=4)
|
2020-03-12 04:39:47 +01:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
|
|
|
ray.shutdown()
|
2020-02-19 21:18:45 +01:00
|
|
|
|
|
|
|
def test_a2c(self):
|
2020-03-12 04:39:47 +01:00
|
|
|
do_test_explorations(
|
2022-06-01 09:29:16 +02:00
|
|
|
a2c.A2C,
|
2020-02-19 21:18:45 +01:00
|
|
|
"CartPole-v0",
|
2022-06-01 09:29:16 +02:00
|
|
|
a2c.A2C_DEFAULT_CONFIG,
|
2020-02-19 21:18:45 +01:00
|
|
|
np.array([0.0, 0.1, 0.0, 0.0]),
|
|
|
|
prev_a=np.array(1),
|
|
|
|
)
|
|
|
|
|
|
|
|
def test_a3c(self):
|
2020-03-12 04:39:47 +01:00
|
|
|
do_test_explorations(
|
2022-06-01 09:29:16 +02:00
|
|
|
a3c.A3C,
|
2020-02-19 21:18:45 +01:00
|
|
|
"CartPole-v0",
|
|
|
|
a3c.DEFAULT_CONFIG,
|
|
|
|
np.array([0.0, 0.1, 0.0, 0.0]),
|
|
|
|
prev_a=np.array(1),
|
|
|
|
)
|
|
|
|
|
2020-03-01 20:53:35 +01:00
|
|
|
def test_ddpg(self):
|
2020-05-02 08:12:21 +02:00
|
|
|
# Switch off random timesteps at beginning. We want to test actual
|
|
|
|
# GaussianNoise right away.
|
|
|
|
config = ddpg.DEFAULT_CONFIG.copy()
|
|
|
|
config["exploration_config"]["random_timesteps"] = 0
|
2020-03-12 04:39:47 +01:00
|
|
|
do_test_explorations(
|
2020-03-01 20:53:35 +01:00
|
|
|
ddpg.DDPGTrainer,
|
[RLlib] Upgrade gym version to 0.21 and deprecate pendulum-v0. (#19535)
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
* Reformatting
* Fixing tests
* Move atari-py install conditional to req.txt
* migrate to new ale install method
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
Move atari-py install conditional to req.txt
migrate to new ale install method
Make parametric_actions_cartpole return float32 actions/obs
Adding type conversions if obs/actions don't match space
Add utils to make elements match gym space dtypes
Co-authored-by: Jun Gong <jungong@anyscale.com>
Co-authored-by: sven1977 <svenmika1977@gmail.com>
2021-11-03 08:24:00 -07:00
|
|
|
"Pendulum-v1",
|
2020-05-02 08:12:21 +02:00
|
|
|
config,
|
2020-03-01 20:53:35 +01:00
|
|
|
np.array([0.0, 0.1, 0.0]),
|
|
|
|
expected_mean_action=0.0,
|
|
|
|
)
|
|
|
|
|
2020-02-19 21:18:45 +01:00
|
|
|
def test_simple_dqn(self):
|
2020-04-06 20:56:16 +02:00
|
|
|
do_test_explorations(
|
|
|
|
dqn.SimpleQTrainer,
|
|
|
|
"CartPole-v0",
|
|
|
|
dqn.SIMPLE_Q_DEFAULT_CONFIG,
|
2020-03-12 04:39:47 +01:00
|
|
|
np.array([0.0, 0.1, 0.0, 0.0]),
|
|
|
|
)
|
2020-02-19 21:18:45 +01:00
|
|
|
|
|
|
|
def test_dqn(self):
|
2020-03-12 04:39:47 +01:00
|
|
|
do_test_explorations(
|
|
|
|
dqn.DQNTrainer,
|
|
|
|
"CartPole-v0",
|
|
|
|
dqn.DEFAULT_CONFIG,
|
|
|
|
np.array([0.0, 0.1, 0.0, 0.0]),
|
|
|
|
)
|
2020-02-19 21:18:45 +01:00
|
|
|
|
|
|
|
def test_impala(self):
|
2020-03-12 04:39:47 +01:00
|
|
|
do_test_explorations(
|
2020-02-19 21:18:45 +01:00
|
|
|
impala.ImpalaTrainer,
|
|
|
|
"CartPole-v0",
|
2021-08-02 17:29:59 -04:00
|
|
|
dict(impala.DEFAULT_CONFIG.copy(), num_gpus=0),
|
2020-02-19 21:18:45 +01:00
|
|
|
np.array([0.0, 0.1, 0.0, 0.0]),
|
2020-02-22 23:19:49 +01:00
|
|
|
prev_a=np.array(0),
|
|
|
|
)
|
2020-02-19 21:18:45 +01:00
|
|
|
|
|
|
|
def test_pg(self):
|
2020-03-12 04:39:47 +01:00
|
|
|
do_test_explorations(
|
2020-02-19 21:18:45 +01:00
|
|
|
pg.PGTrainer,
|
|
|
|
"CartPole-v0",
|
|
|
|
pg.DEFAULT_CONFIG,
|
|
|
|
np.array([0.0, 0.1, 0.0, 0.0]),
|
2020-02-22 23:19:49 +01:00
|
|
|
prev_a=np.array(1),
|
|
|
|
)
|
2020-02-19 21:18:45 +01:00
|
|
|
|
|
|
|
def test_ppo_discr(self):
|
2020-03-12 04:39:47 +01:00
|
|
|
do_test_explorations(
|
2020-02-19 21:18:45 +01:00
|
|
|
ppo.PPOTrainer,
|
|
|
|
"CartPole-v0",
|
|
|
|
ppo.DEFAULT_CONFIG,
|
|
|
|
np.array([0.0, 0.1, 0.0, 0.0]),
|
2020-02-22 23:19:49 +01:00
|
|
|
prev_a=np.array(0),
|
|
|
|
)
|
2020-02-19 21:18:45 +01:00
|
|
|
|
|
|
|
def test_ppo_cont(self):
|
2020-03-12 04:39:47 +01:00
|
|
|
do_test_explorations(
|
2020-02-19 21:18:45 +01:00
|
|
|
ppo.PPOTrainer,
|
[RLlib] Upgrade gym version to 0.21 and deprecate pendulum-v0. (#19535)
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
* Reformatting
* Fixing tests
* Move atari-py install conditional to req.txt
* migrate to new ale install method
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
Move atari-py install conditional to req.txt
migrate to new ale install method
Make parametric_actions_cartpole return float32 actions/obs
Adding type conversions if obs/actions don't match space
Add utils to make elements match gym space dtypes
Co-authored-by: Jun Gong <jungong@anyscale.com>
Co-authored-by: sven1977 <svenmika1977@gmail.com>
2021-11-03 08:24:00 -07:00
|
|
|
"Pendulum-v1",
|
2020-02-19 21:18:45 +01:00
|
|
|
ppo.DEFAULT_CONFIG,
|
|
|
|
np.array([0.0, 0.1, 0.0]),
|
2020-02-22 23:19:49 +01:00
|
|
|
prev_a=np.array([0.0]),
|
2020-02-19 21:18:45 +01:00
|
|
|
expected_mean_action=0.0,
|
|
|
|
)
|
|
|
|
|
|
|
|
def test_sac(self):
|
2020-03-12 04:39:47 +01:00
|
|
|
do_test_explorations(
|
2020-02-19 21:18:45 +01:00
|
|
|
sac.SACTrainer,
|
[RLlib] Upgrade gym version to 0.21 and deprecate pendulum-v0. (#19535)
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
* Reformatting
* Fixing tests
* Move atari-py install conditional to req.txt
* migrate to new ale install method
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
Move atari-py install conditional to req.txt
migrate to new ale install method
Make parametric_actions_cartpole return float32 actions/obs
Adding type conversions if obs/actions don't match space
Add utils to make elements match gym space dtypes
Co-authored-by: Jun Gong <jungong@anyscale.com>
Co-authored-by: sven1977 <svenmika1977@gmail.com>
2021-11-03 08:24:00 -07:00
|
|
|
"Pendulum-v1",
|
2020-02-19 21:18:45 +01:00
|
|
|
sac.DEFAULT_CONFIG,
|
|
|
|
np.array([0.0, 0.1, 0.0]),
|
|
|
|
expected_mean_action=0.0,
|
|
|
|
)
|
|
|
|
|
2020-03-01 20:53:35 +01:00
|
|
|
def test_td3(self):
|
2020-05-02 08:12:21 +02:00
|
|
|
config = td3.TD3_DEFAULT_CONFIG.copy()
|
|
|
|
# Switch off random timesteps at beginning. We want to test actual
|
|
|
|
# GaussianNoise right away.
|
|
|
|
config["exploration_config"]["random_timesteps"] = 0
|
2020-03-12 04:39:47 +01:00
|
|
|
do_test_explorations(
|
2020-03-01 20:53:35 +01:00
|
|
|
td3.TD3Trainer,
|
[RLlib] Upgrade gym version to 0.21 and deprecate pendulum-v0. (#19535)
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
* Reformatting
* Fixing tests
* Move atari-py install conditional to req.txt
* migrate to new ale install method
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
Move atari-py install conditional to req.txt
migrate to new ale install method
Make parametric_actions_cartpole return float32 actions/obs
Adding type conversions if obs/actions don't match space
Add utils to make elements match gym space dtypes
Co-authored-by: Jun Gong <jungong@anyscale.com>
Co-authored-by: sven1977 <svenmika1977@gmail.com>
2021-11-03 08:24:00 -07:00
|
|
|
"Pendulum-v1",
|
2020-05-02 08:12:21 +02:00
|
|
|
config,
|
2020-03-01 20:53:35 +01:00
|
|
|
np.array([0.0, 0.1, 0.0]),
|
|
|
|
expected_mean_action=0.0,
|
|
|
|
)
|
|
|
|
|
2020-02-19 21:18:45 +01:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2020-03-12 04:39:47 +01:00
|
|
|
import pytest
|
2022-01-29 18:41:57 -08:00
|
|
|
|
2020-03-12 04:39:47 +01:00
|
|
|
sys.exit(pytest.main(["-v", __file__]))
|