2019-12-31 00:16:54 -08:00
|
|
|
# Our implementation of SAC can reach 9k reward in 400k timesteps
|
|
|
|
halfcheetah_sac:
|
|
|
|
env: HalfCheetah-v3
|
|
|
|
run: SAC
|
|
|
|
stop:
|
|
|
|
episode_reward_mean: 9000
|
|
|
|
config:
|
2020-05-27 16:19:13 +02:00
|
|
|
# Works for both torch and tf.
|
|
|
|
framework: tf
|
2019-12-31 00:16:54 -08:00
|
|
|
horizon: 1000
|
2020-04-15 13:25:16 +02:00
|
|
|
soft_horizon: false
|
2019-12-31 00:16:54 -08:00
|
|
|
Q_model:
|
2020-04-15 13:25:16 +02:00
|
|
|
fcnet_activation: relu
|
|
|
|
fcnet_hiddens: [256, 256]
|
2019-12-31 00:16:54 -08:00
|
|
|
policy_model:
|
2020-04-15 13:25:16 +02:00
|
|
|
fcnet_activation: relu
|
|
|
|
fcnet_hiddens: [256, 256]
|
2019-12-31 00:16:54 -08:00
|
|
|
tau: 0.005
|
|
|
|
target_entropy: auto
|
2020-04-15 13:25:16 +02:00
|
|
|
no_done_at_end: true
|
2019-12-31 00:16:54 -08:00
|
|
|
n_step: 1
|
2020-03-14 12:05:04 -07:00
|
|
|
rollout_fragment_length: 1
|
2019-12-31 00:16:54 -08:00
|
|
|
train_batch_size: 256
|
|
|
|
target_network_update_freq: 1
|
2022-05-02 12:51:14 +02:00
|
|
|
min_sample_timesteps_per_reporting: 1000
|
2022-05-17 13:43:49 +02:00
|
|
|
replay_buffer_config:
|
|
|
|
type: MultiAgentPrioritizedReplayBuffer
|
|
|
|
learning_starts: 10000
|
2019-12-31 00:16:54 -08:00
|
|
|
optimization:
|
|
|
|
actor_learning_rate: 0.0003
|
|
|
|
critic_learning_rate: 0.0003
|
|
|
|
entropy_learning_rate: 0.0003
|
|
|
|
num_workers: 0
|
|
|
|
num_gpus: 0
|
2020-04-15 13:25:16 +02:00
|
|
|
clip_actions: false
|
|
|
|
normalize_actions: true
|
2019-12-31 00:16:54 -08:00
|
|
|
evaluation_interval: 1
|
|
|
|
metrics_smoothing_episodes: 5
|
|
|
|
|