ray/rllib/tuned_examples/pendulum-td3.yaml

21 lines
527 B
YAML
Raw Normal View History

# This configuration can expect to reach -160 reward in 10k-20k timesteps
pendulum-ddpg:
env: Pendulum-v0
run: TD3
stop:
episode_reward_mean: -130
time_total_s: 900 # 10 minutes
config:
# === Model ===
actor_hiddens: [64, 64]
critic_hiddens: [64, 64]
# === Exploration ===
learning_starts: 5000
exploration_config:
random_timesteps: 5000
# === Evaluation ===
evaluation_interval: 1
evaluation_num_episodes: 5