2018-09-18 15:09:16 -07:00
|
|
|
import argparse
|
2020-10-02 23:07:44 +02:00
|
|
|
import os
|
2020-05-01 22:59:34 +02:00
|
|
|
|
|
|
|
from ray.rllib.examples.env.stateless_cartpole import StatelessCartPole
|
2020-05-12 08:23:10 +02:00
|
|
|
from ray.rllib.utils.test_utils import check_learning_achieved
|
2018-09-18 15:09:16 -07:00
|
|
|
|
|
|
|
parser = argparse.ArgumentParser()
|
2021-05-18 13:18:12 +02:00
|
|
|
parser.add_argument(
|
|
|
|
"--run", type=str, default="PPO", help="The RLlib-registered algorithm to use."
|
|
|
|
)
|
2020-02-15 23:50:44 +01:00
|
|
|
parser.add_argument("--num-cpus", type=int, default=0)
|
2020-11-02 11:18:41 +01:00
|
|
|
parser.add_argument(
|
2021-05-18 13:18:12 +02:00
|
|
|
"--framework",
|
|
|
|
choices=["tf", "tf2", "tfe", "torch"],
|
|
|
|
default="tf",
|
|
|
|
help="The DL framework specifier.",
|
|
|
|
)
|
2021-05-17 18:24:13 +02:00
|
|
|
parser.add_argument("--eager-tracing", action="store_true")
|
2020-11-25 20:27:46 +01:00
|
|
|
parser.add_argument("--use-prev-action", action="store_true")
|
|
|
|
parser.add_argument("--use-prev-reward", action="store_true")
|
2021-05-18 13:18:12 +02:00
|
|
|
parser.add_argument(
|
|
|
|
"--as-test",
|
|
|
|
action="store_true",
|
|
|
|
help="Whether this script should be run as a test: --stop-reward must "
|
|
|
|
"be achieved within --stop-timesteps AND --stop-iters.",
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--stop-iters", type=int, default=200, help="Number of iterations to train."
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--stop-timesteps", type=int, default=100000, help="Number of timesteps to train."
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--stop-reward", type=float, default=150.0, help="Reward at which we stop training."
|
|
|
|
)
|
2018-09-18 15:09:16 -07:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
import ray
|
2022-07-27 04:12:59 -07:00
|
|
|
from ray import air, tune
|
2018-09-18 15:09:16 -07:00
|
|
|
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
2020-02-15 23:50:44 +01:00
|
|
|
ray.init(num_cpus=args.num_cpus or None)
|
2018-10-15 11:02:50 -07:00
|
|
|
|
|
|
|
configs = {
|
|
|
|
"PPO": {
|
|
|
|
"num_sgd_iter": 5,
|
2021-01-19 09:51:35 +01:00
|
|
|
"model": {
|
|
|
|
"vf_share_layers": True,
|
|
|
|
},
|
2019-02-22 11:18:51 -08:00
|
|
|
"vf_loss_coeff": 0.0001,
|
2018-10-15 11:02:50 -07:00
|
|
|
},
|
|
|
|
"IMPALA": {
|
|
|
|
"num_workers": 2,
|
|
|
|
"num_gpus": 0,
|
|
|
|
"vf_loss_coeff": 0.01,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-05-12 08:23:10 +02:00
|
|
|
config = dict(
|
2020-10-02 23:07:44 +02:00
|
|
|
configs[args.run],
|
|
|
|
**{
|
2020-05-12 08:23:10 +02:00
|
|
|
"env": StatelessCartPole,
|
2020-10-02 23:07:44 +02:00
|
|
|
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
|
|
|
|
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
|
2020-05-12 08:23:10 +02:00
|
|
|
"model": {
|
|
|
|
"use_lstm": True,
|
2021-04-15 16:11:34 +02:00
|
|
|
"lstm_cell_size": 256,
|
2020-11-25 20:27:46 +01:00
|
|
|
"lstm_use_prev_action": args.use_prev_action,
|
|
|
|
"lstm_use_prev_reward": args.use_prev_reward,
|
2020-05-12 08:23:10 +02:00
|
|
|
},
|
2020-11-02 11:18:41 +01:00
|
|
|
"framework": args.framework,
|
2021-05-17 18:24:13 +02:00
|
|
|
# Run with tracing enabled for tfe/tf2?
|
|
|
|
"eager_tracing": args.eager_tracing,
|
2020-05-12 08:23:10 +02:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
stop = {
|
|
|
|
"training_iteration": args.stop_iters,
|
|
|
|
"timesteps_total": args.stop_timesteps,
|
|
|
|
"episode_reward_mean": args.stop_reward,
|
|
|
|
}
|
|
|
|
|
2022-07-27 04:12:59 -07:00
|
|
|
# To run the Algorithm without ``Tuner.fit``, using our LSTM model and
|
2021-04-15 16:11:34 +02:00
|
|
|
# manual state-in handling, do the following:
|
|
|
|
|
|
|
|
# Example (use `config` from the above code):
|
|
|
|
# >> import numpy as np
|
2022-06-04 07:35:24 +02:00
|
|
|
# >> from ray.rllib.algorithms.ppo import PPO
|
2021-04-15 16:11:34 +02:00
|
|
|
# >>
|
2022-06-20 15:54:00 +02:00
|
|
|
# >> algo = PPO(config)
|
2021-04-15 16:11:34 +02:00
|
|
|
# >> lstm_cell_size = config["model"]["lstm_cell_size"]
|
|
|
|
# >> env = StatelessCartPole()
|
|
|
|
# >> obs = env.reset()
|
|
|
|
# >>
|
|
|
|
# >> # range(2) b/c h- and c-states of the LSTM.
|
|
|
|
# >> init_state = state = [
|
|
|
|
# .. np.zeros([lstm_cell_size], np.float32) for _ in range(2)
|
|
|
|
# .. ]
|
|
|
|
# >> prev_a = 0
|
|
|
|
# >> prev_r = 0.0
|
|
|
|
# >>
|
|
|
|
# >> while True:
|
2022-06-20 15:54:00 +02:00
|
|
|
# >> a, state_out, _ = algo.compute_single_action(
|
2021-04-15 16:11:34 +02:00
|
|
|
# .. obs, state, prev_a, prev_r)
|
|
|
|
# >> obs, reward, done, _ = env.step(a)
|
|
|
|
# >> if done:
|
|
|
|
# >> obs = env.reset()
|
|
|
|
# >> state = init_state
|
|
|
|
# >> prev_a = 0
|
|
|
|
# >> prev_r = 0.0
|
|
|
|
# >> else:
|
|
|
|
# >> state = state_out
|
|
|
|
# >> prev_a = a
|
|
|
|
# >> prev_r = reward
|
|
|
|
|
2022-07-27 04:12:59 -07:00
|
|
|
tuner = tune.Tuner(
|
|
|
|
args.run, param_space=config, run_config=air.RunConfig(stop=stop, verbose=2)
|
|
|
|
)
|
|
|
|
results = tuner.fit()
|
2020-05-12 08:23:10 +02:00
|
|
|
|
|
|
|
if args.as_test:
|
|
|
|
check_learning_achieved(results, args.stop_reward)
|
|
|
|
ray.shutdown()
|