2018-08-25 14:17:14 -07:00
|
|
|
pong-deterministic-rainbow:
|
|
|
|
env: PongDeterministic-v4
|
|
|
|
run: DQN
|
|
|
|
stop:
|
|
|
|
episode_reward_mean: 20
|
|
|
|
config:
|
|
|
|
num_atoms: 51
|
|
|
|
noisy: True
|
|
|
|
gamma: 0.99
|
|
|
|
lr: .0001
|
|
|
|
hiddens: [512]
|
2020-03-14 12:05:04 -07:00
|
|
|
rollout_fragment_length: 4
|
2018-08-25 14:17:14 -07:00
|
|
|
train_batch_size: 32
|
2020-02-20 17:39:16 +01:00
|
|
|
exploration_config:
|
2020-02-11 00:22:07 +01:00
|
|
|
epsilon_timesteps: 2
|
|
|
|
final_epsilon: 0.0
|
2018-08-25 14:17:14 -07:00
|
|
|
target_network_update_freq: 500
|
2022-05-17 13:43:49 +02:00
|
|
|
replay_buffer_config:
|
|
|
|
type: MultiAgentPrioritizedReplayBuffer
|
|
|
|
prioritized_replay_alpha: 0.5
|
|
|
|
learning_starts: 10000
|
|
|
|
capacity: 50000
|
2018-08-25 14:17:14 -07:00
|
|
|
n_step: 3
|
|
|
|
gpu: True
|
|
|
|
model:
|
|
|
|
grayscale: True
|
|
|
|
zero_mean: False
|
|
|
|
dim: 42
|
2022-07-22 10:10:51 -07:00
|
|
|
# we should set compress_observations to True because few machines
|
|
|
|
# would be able to contain the replay buffers in memory otherwise
|
|
|
|
compress_observations: True
|