ray/rllib/tests/multiagent_pendulum.py
Sven 60d4d5e1aa Remove future imports (#6724)
* Remove all __future__ imports from RLlib.

* Remove (object) again from tf_run_builder.py::TFRunBuilder.

* Fix 2xLINT warnings.

* Fix broken appo_policy import (must be appo_tf_policy)

* Remove future imports from all other ray files (not just RLlib).

* Remove future imports from all other ray files (not just RLlib).

* Remove future import blocks that contain `unicode_literals` as well.
Revert appo_tf_policy.py to appo_policy.py (belongs to another PR).

* Add two empty lines before Schedule class.

* Put back __future__ imports into determine_tests_to_run.py. Fails otherwise on a py2/print related error.
2020-01-09 00:15:48 -08:00

38 lines
1.3 KiB
Python

"""Integration test: (1) pendulum works, (2) single-agent multi-agent works."""
import ray
from ray.rllib.tests.test_multi_agent_env import make_multiagent
from ray.tune import run_experiments
from ray.tune.registry import register_env
if __name__ == "__main__":
ray.init()
MultiPendulum = make_multiagent("Pendulum-v0")
register_env("multi_pend", lambda _: MultiPendulum(1))
trials = run_experiments({
"test": {
"run": "PPO",
"env": "multi_pend",
"stop": {
"timesteps_total": 500000,
"episode_reward_mean": -200,
},
"config": {
"train_batch_size": 2048,
"vf_clip_param": 10.0,
"num_workers": 0,
"num_envs_per_worker": 10,
"lambda": 0.1,
"gamma": 0.95,
"lr": 0.0003,
"sgd_minibatch_size": 64,
"num_sgd_iter": 10,
"model": {
"fcnet_hiddens": [64, 64],
},
"batch_mode": "complete_episodes",
},
}
})
if trials[0].last_result["episode_reward_mean"] < -200:
raise ValueError("Did not get to -200 reward", trials[0].last_result)