mirror of
https://github.com/vale981/ray
synced 2025-03-06 10:31:39 -05:00

* Fix. * Rollback. * WIP. * WIP. * WIP. * WIP. * WIP. * WIP. * WIP. * WIP. * Fix. * Fix. * Fix. * Fix. * Fix. * WIP. * WIP. * Fix. * Test case fixes. * Test case fixes and LINT. * Test case fixes and LINT. * Rollback. * WIP. * WIP. * Test case fixes. * Fix. * Fix. * Fix. * Add regression test for DQN w/ param noise. * Fixes and LINT. * Fixes and LINT. * Fixes and LINT. * Fixes and LINT. * Fixes and LINT. * Comment * Regression test case. * WIP. * WIP. * LINT. * LINT. * WIP. * Fix. * Fix. * Fix. * LINT. * Fix (SAC does currently not support eager). * Fix. * WIP. * LINT. * Update rllib/evaluation/sampler.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Update rllib/evaluation/sampler.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Update rllib/utils/exploration/exploration.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Update rllib/utils/exploration/exploration.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * WIP. * WIP. * Fix. * LINT. * LINT. * Fix and LINT. * WIP. * WIP. * WIP. * WIP. * Fix. * LINT. * Fix. * Fix and LINT. * Update rllib/utils/exploration/exploration.py * Update rllib/policy/dynamic_tf_policy.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Update rllib/policy/dynamic_tf_policy.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Update rllib/policy/dynamic_tf_policy.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Fixes. * WIP. * LINT. * Fixes and LINT. * LINT and fixes. * LINT. * Move action_dist back into torch extra_action_out_fn and LINT. * Working SimpleQ learning cartpole on both torch AND tf. * Working Rainbow learning cartpole on tf. * Working Rainbow learning cartpole on tf. * WIP. * LINT. * LINT. * Update docs and add torch to APEX test. * LINT. * Fix. * LINT. * Fix. * Fix. * Fix and docstrings. * Fix broken RLlib tests in master. * Split BAZEL learning tests into cartpole and pendulum (reached the 60min barrier). * Fix error_outputs option in BAZEL for RLlib regression tests. * Fix. * Tune param-noise tests. * LINT. * Fix. * Fix. * test * test * test * Fix. * Fix. * WIP. * WIP. * WIP. * WIP. * LINT. * WIP. Co-authored-by: Eric Liang <ekhliang@gmail.com>
70 lines
2.3 KiB
Python
70 lines
2.3 KiB
Python
#!/usr/bin/env python
|
|
# Runs one or more regression tests. Retries tests up to 3 times.
|
|
#
|
|
# Example usage:
|
|
# $ python run_regression_tests.py regression-tests/cartpole-es.yaml
|
|
#
|
|
# When using in BAZEL (with py_test), e.g. see in ray/rllib/BUILD:
|
|
# py_test(
|
|
# name = "run_regression_tests",
|
|
# main = "tests/run_regression_tests.py",
|
|
# tags = ["learning_tests"],
|
|
# size = "enormous", # = 60min timeout
|
|
# srcs = ["tests/run_regression_tests.py"],
|
|
# data = glob(["tuned_examples/regression_tests/*.yaml"]),
|
|
# Pass `BAZEL` option and the path to look for yaml regression files.
|
|
# args = ["BAZEL", "tuned_examples/regression_tests"]
|
|
# )
|
|
|
|
from pathlib import Path
|
|
import sys
|
|
import yaml
|
|
|
|
import ray
|
|
from ray.tune import run_experiments
|
|
|
|
if __name__ == "__main__":
|
|
# Bazel regression test mode: Get path to look for yaml files from argv[2].
|
|
if sys.argv[1] == "BAZEL":
|
|
ray.init(num_cpus=5)
|
|
# Get the path to use.
|
|
rllib_dir = Path(__file__).parent.parent
|
|
print("rllib dir={}".format(rllib_dir))
|
|
yaml_files = rllib_dir.rglob(sys.argv[2] + "/*.yaml")
|
|
yaml_files = sorted(
|
|
map(lambda path: str(path.absolute()), yaml_files), reverse=True)
|
|
# Normal mode: Get yaml files to run from command line.
|
|
else:
|
|
ray.init()
|
|
yaml_files = sys.argv[1:]
|
|
|
|
print("Will run the following regression files:")
|
|
for yaml_file in yaml_files:
|
|
print("->", yaml_file)
|
|
|
|
# Loop through all collected files.
|
|
for yaml_file in yaml_files:
|
|
experiments = yaml.load(open(yaml_file).read())
|
|
|
|
print("== Test config ==")
|
|
print(yaml.dump(experiments))
|
|
|
|
passed = False
|
|
for i in range(3):
|
|
trials = run_experiments(experiments, resume=False, verbose=0)
|
|
|
|
for t in trials:
|
|
if (t.last_result["episode_reward_mean"] >=
|
|
t.stopping_criterion["episode_reward_mean"]):
|
|
passed = True
|
|
break
|
|
|
|
if passed:
|
|
print("Regression test PASSED")
|
|
break
|
|
else:
|
|
print("Regression test FAILED on attempt {}", i + 1)
|
|
|
|
if not passed:
|
|
print("Overall regression FAILED: Exiting with Error.")
|
|
sys.exit(1)
|