mirror of
https://github.com/vale981/ray
synced 2025-03-06 10:31:39 -05:00

* Fix QMix, SAC, and MADDPA too. * Unpin gym and deprecate pendulum v0 Many tests in rllib depended on pendulum v0, however in gym 0.21, pendulum v0 was deprecated in favor of pendulum v1. This may change reward thresholds, so will have to potentially rerun all of the pendulum v1 benchmarks, or use another environment in favor. The same applies to frozen lake v0 and frozen lake v1 Lastly, all of the RLlib tests and have been moved to python 3.7 * Add gym installation based on python version. Pin python<= 3.6 to gym 0.19 due to install issues with atari roms in gym 0.20 * Reformatting * Fixing tests * Move atari-py install conditional to req.txt * migrate to new ale install method * Fix QMix, SAC, and MADDPA too. * Unpin gym and deprecate pendulum v0 Many tests in rllib depended on pendulum v0, however in gym 0.21, pendulum v0 was deprecated in favor of pendulum v1. This may change reward thresholds, so will have to potentially rerun all of the pendulum v1 benchmarks, or use another environment in favor. The same applies to frozen lake v0 and frozen lake v1 Lastly, all of the RLlib tests and have been moved to python 3.7 * Add gym installation based on python version. Pin python<= 3.6 to gym 0.19 due to install issues with atari roms in gym 0.20 Move atari-py install conditional to req.txt migrate to new ale install method Make parametric_actions_cartpole return float32 actions/obs Adding type conversions if obs/actions don't match space Add utils to make elements match gym space dtypes Co-authored-by: Jun Gong <jungong@anyscale.com> Co-authored-by: sven1977 <svenmika1977@gmail.com>
80 lines
2.2 KiB
Python
80 lines
2.2 KiB
Python
"""Example of using a custom model with batch norm."""
|
|
|
|
import argparse
|
|
import os
|
|
|
|
import ray
|
|
from ray import tune
|
|
from ray.rllib.examples.models.batch_norm_model import BatchNormModel, \
|
|
KerasBatchNormModel, TorchBatchNormModel
|
|
from ray.rllib.models import ModelCatalog
|
|
from ray.rllib.utils.framework import try_import_tf
|
|
from ray.rllib.utils.test_utils import check_learning_achieved
|
|
|
|
tf1, tf, tfv = try_import_tf()
|
|
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument(
|
|
"--run",
|
|
type=str,
|
|
default="PPO",
|
|
help="The RLlib-registered algorithm to use.")
|
|
parser.add_argument(
|
|
"--framework",
|
|
choices=["tf", "tf2", "tfe", "torch"],
|
|
default="tf",
|
|
help="The DL framework specifier.")
|
|
parser.add_argument(
|
|
"--as-test",
|
|
action="store_true",
|
|
help="Whether this script should be run as a test: --stop-reward must "
|
|
"be achieved within --stop-timesteps AND --stop-iters.")
|
|
parser.add_argument(
|
|
"--stop-iters",
|
|
type=int,
|
|
default=200,
|
|
help="Number of iterations to train.")
|
|
parser.add_argument(
|
|
"--stop-timesteps",
|
|
type=int,
|
|
default=100000,
|
|
help="Number of timesteps to train.")
|
|
parser.add_argument(
|
|
"--stop-reward",
|
|
type=float,
|
|
default=150.0,
|
|
help="Reward at which we stop training.")
|
|
|
|
if __name__ == "__main__":
|
|
args = parser.parse_args()
|
|
ray.init()
|
|
|
|
ModelCatalog.register_custom_model(
|
|
"bn_model", TorchBatchNormModel
|
|
if args.framework == "torch" else KerasBatchNormModel
|
|
if args.run != "PPO" else BatchNormModel)
|
|
|
|
config = {
|
|
"env": "Pendulum-v1" if args.run in ["DDPG", "SAC"] else "CartPole-v0",
|
|
"model": {
|
|
"custom_model": "bn_model",
|
|
},
|
|
"lr": 0.0003,
|
|
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
|
|
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
|
|
"num_workers": 0,
|
|
"framework": args.framework,
|
|
}
|
|
|
|
stop = {
|
|
"training_iteration": args.stop_iters,
|
|
"timesteps_total": args.stop_timesteps,
|
|
"episode_reward_mean": args.stop_reward,
|
|
}
|
|
|
|
results = tune.run(args.run, stop=stop, config=config, verbose=2)
|
|
|
|
if args.as_test:
|
|
check_learning_achieved(results, args.stop_reward)
|
|
|
|
ray.shutdown()
|