ray/rllib/examples/parallel_evaluation_and_training.py

58 lines
2 KiB
Python

import argparse
import os
from ray.rllib.utils.test_utils import check_learning_achieved
parser = argparse.ArgumentParser()
parser.add_argument("--run", type=str, default="PPO")
parser.add_argument("--num-cpus", type=int, default=0)
parser.add_argument(
"--framework", choices=["tf2", "tf", "tfe", "torch"], default="tf")
parser.add_argument("--as-test", action="store_true")
parser.add_argument("--stop-iters", type=int, default=200)
parser.add_argument("--stop-timesteps", type=int, default=100000)
parser.add_argument("--stop-reward", type=float, default=100.0)
if __name__ == "__main__":
import ray
from ray import tune
args = parser.parse_args()
ray.init(num_cpus=args.num_cpus or None)
config = {
"env": "CartPole-v0",
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"model": {
"vf_share_layers": True,
},
"framework": args.framework,
# Run with tracing enabled for tfe/tf2.
"eager_tracing": args.framework in ["tfe", "tf2"],
# Parallel evaluation+training config.
# Use two evaluation workers.
"evaluation_num_workers": 2,
# Evaluate every other training iteration (together
# with every other call to Trainer.train()).
"evaluation_interval": 2,
# Run for 50 episodes (25 per eval worker and per
# evaluation round). The longer it takes to evaluate, the more
# sense it makes to use `evaluation_parallel_to_training=True`.
"evaluation_num_episodes": 10,
# Switch on evaluation in parallel with training.
"evaluation_parallel_to_training": True,
}
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
results = tune.run(args.run, config=config, stop=stop, verbose=2)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
ray.shutdown()