mirror of
https://github.com/vale981/ray
synced 2025-03-05 10:01:43 -05:00
29 lines
932 B
YAML
29 lines
932 B
YAML
# You can expect ~20 reward within 1.1m timesteps / 2.1 hours on a K80 GPU
|
|
pong-deterministic-dqn:
|
|
env: PongDeterministic-v4
|
|
run: DQN
|
|
stop:
|
|
episode_reward_mean: 20
|
|
time_total_s: 7200
|
|
config:
|
|
# Works for both torch and tf.
|
|
framework: tf
|
|
num_gpus: 1
|
|
gamma: 0.99
|
|
lr: .0001
|
|
replay_buffer_config:
|
|
type: MultiAgentPrioritizedReplayBuffer
|
|
capacity: 50000
|
|
num_steps_sampled_before_learning_starts: 10000
|
|
rollout_fragment_length: 4
|
|
train_batch_size: 32
|
|
exploration_config:
|
|
epsilon_timesteps: 200000
|
|
final_epsilon: .01
|
|
model:
|
|
grayscale: True
|
|
zero_mean: False
|
|
dim: 42
|
|
# we should set compress_observations to True because few machines
|
|
# would be able to contain the replay buffers in memory otherwise
|
|
compress_observations: True
|