ray/rllib/tuned_examples/ppo/repeatafterme-ppo-lstm.yaml
2020-06-11 14:29:57 +02:00

24 lines
634 B
YAML

repeat-after-me-ppo-w-lstm:
env: ray.rllib.examples.env.repeat_after_me_env.RepeatAfterMeEnv
run: PPO
stop:
episode_reward_mean: 50
timesteps_total: 100000
config:
# Works for both torch and tf.
framework: tf
env_config:
config:
repeat_delay: 2
gamma: 0.9
lr: 0.0003
num_workers: 0
num_envs_per_worker: 20
num_sgd_iter: 5
vf_share_layers: true
entropy_coeff: 0.00001
model:
use_lstm: true
lstm_cell_size: 64
max_seq_len: 20
fcnet_hiddens: [64]