mirror of
https://github.com/vale981/ray
synced 2025-03-12 14:16:39 -04:00
36 lines
1 KiB
YAML
36 lines
1 KiB
YAML
![]() |
sac-halfcheetahbulletenv-v0:
|
||
|
env: HalfCheetahBulletEnv-v0
|
||
|
run: SAC
|
||
|
# Minimum reward and total ts (in given time_total_s) to pass this test.
|
||
|
pass_criteria:
|
||
|
episode_reward_mean: 400.0
|
||
|
timesteps_total: 200000
|
||
|
stop:
|
||
|
time_total_s: 7200
|
||
|
config:
|
||
|
horizon: 1000
|
||
|
soft_horizon: false
|
||
|
Q_model:
|
||
|
fcnet_activation: relu
|
||
|
fcnet_hiddens: [256, 256]
|
||
|
policy_model:
|
||
|
fcnet_activation: relu
|
||
|
fcnet_hiddens: [256, 256]
|
||
|
tau: 0.005
|
||
|
target_entropy: auto
|
||
|
no_done_at_end: false
|
||
|
n_step: 3
|
||
|
rollout_fragment_length: 1
|
||
|
prioritized_replay: true
|
||
|
train_batch_size: 256
|
||
|
target_network_update_freq: 1
|
||
|
min_sample_timesteps_per_reporting: 1000
|
||
|
learning_starts: 10000
|
||
|
optimization:
|
||
|
actor_learning_rate: 0.0003
|
||
|
critic_learning_rate: 0.0003
|
||
|
entropy_learning_rate: 0.0003
|
||
|
num_workers: 0
|
||
|
num_gpus: 1
|
||
|
metrics_smoothing_episodes: 5
|