2020-04-15 13:25:16 +02:00
|
|
|
# Run e.g. on a g3.16xlarge (4 GPUs) with `num_gpus=1` (1 for each trial;
|
|
|
|
# MsPacman torch + tf; Pong torch + tf).
|
|
|
|
# Uses the hyperparameters published in [2] (see rllib/agents/sac/README.md).
|
|
|
|
atari-sac-tf-and-torch:
|
|
|
|
env:
|
|
|
|
grid_search:
|
|
|
|
- MsPacmanNoFrameskip-v4
|
|
|
|
- PongNoFrameskip-v4
|
|
|
|
run: SAC
|
|
|
|
stop:
|
|
|
|
timesteps_total: 20000000
|
|
|
|
config:
|
2020-05-27 16:19:13 +02:00
|
|
|
# Works for both torch and tf.
|
|
|
|
framework:
|
|
|
|
grid_search: [tf, torch]
|
2020-04-15 13:25:16 +02:00
|
|
|
gamma: 0.99
|
2022-05-22 18:58:47 +01:00
|
|
|
q_model_config:
|
2020-04-15 13:25:16 +02:00
|
|
|
hidden_activation: relu
|
|
|
|
hidden_layer_sizes: [512]
|
2022-05-22 18:58:47 +01:00
|
|
|
policy_model_config:
|
2020-04-15 13:25:16 +02:00
|
|
|
hidden_activation: relu
|
|
|
|
hidden_layer_sizes: [512]
|
|
|
|
# Do hard syncs.
|
|
|
|
# Soft-syncs seem to work less reliably for discrete action spaces.
|
|
|
|
tau: 1.0
|
|
|
|
target_network_update_freq: 8000
|
|
|
|
# auto = 0.98 * -log(1/|A|)
|
|
|
|
target_entropy: auto
|
|
|
|
clip_rewards: 1.0
|
|
|
|
no_done_at_end: False
|
|
|
|
n_step: 1
|
|
|
|
rollout_fragment_length: 1
|
2022-05-09 14:33:02 +02:00
|
|
|
replay_buffer_config:
|
2022-05-17 13:43:49 +02:00
|
|
|
type: MultiAgentPrioritizedReplayBuffer
|
2022-05-09 14:33:02 +02:00
|
|
|
capacity: 1000000
|
|
|
|
# How many steps of the model to sample before learning starts.
|
2022-05-17 13:43:49 +02:00
|
|
|
learning_starts: 100000
|
2022-05-09 14:33:02 +02:00
|
|
|
# If True prioritized replay buffer will be used.
|
|
|
|
prioritized_replay_alpha: 0.6
|
|
|
|
prioritized_replay_beta: 0.4
|
|
|
|
prioritized_replay_eps: 1e-6
|
2020-04-15 13:25:16 +02:00
|
|
|
train_batch_size: 64
|
2022-05-02 12:51:14 +02:00
|
|
|
min_sample_timesteps_per_reporting: 4
|
2020-04-15 13:25:16 +02:00
|
|
|
# Paper uses 20k random timesteps, which is not exactly the same, but
|
|
|
|
# seems to work nevertheless. We use 100k here for the longer Atari
|
|
|
|
# runs (DQN style: filling up the buffer a bit before learning).
|
|
|
|
optimization:
|
|
|
|
actor_learning_rate: 0.0003
|
|
|
|
critic_learning_rate: 0.0003
|
|
|
|
entropy_learning_rate: 0.0003
|
|
|
|
num_workers: 0
|
|
|
|
num_gpus: 1
|
|
|
|
metrics_smoothing_episodes: 5
|