mirror of
https://github.com/vale981/ray
synced 2025-03-06 18:41:40 -05:00

The DDPG/TD3 algorithms currently do not have a PyTorch implementation. This PR adds PyTorch support for DDPG/TD3 to RLlib. This PR: - Depends on the re-factor PR for DDPG (Functional Algorithm API). - Adds learning regression tests for the PyTorch version of DDPG and a DDPG (torch) - Updates the documentation to reflect that DDPG and TD3 now support PyTorch. * Learning Pendulum-v0 on torch version (same config as tf). Wall time a little slower (~20% than tf). * Fix GPU target model problem.
63 lines
2.2 KiB
Python
63 lines
2.2 KiB
Python
"""A more stable successor to TD3.
|
|
|
|
By default, this uses a near-identical configuration to that reported in the
|
|
TD3 paper.
|
|
"""
|
|
from ray.rllib.agents.ddpg.ddpg import DDPGTrainer, \
|
|
DEFAULT_CONFIG as DDPG_CONFIG
|
|
|
|
TD3_DEFAULT_CONFIG = DDPGTrainer.merge_trainer_configs(
|
|
DDPG_CONFIG,
|
|
{
|
|
# largest changes: twin Q functions, delayed policy updates, and target
|
|
# smoothing
|
|
"twin_q": True,
|
|
"policy_delay": 2,
|
|
"smooth_target_policy": True,
|
|
"target_noise": 0.2,
|
|
"target_noise_clip": 0.5,
|
|
"exploration_config": {
|
|
# TD3 uses simple Gaussian noise on top of deterministic NN-output
|
|
# actions (after a possible pure random phase of n timesteps).
|
|
"type": "GaussianNoise",
|
|
# For how many timesteps should we return completely random
|
|
# actions, before we start adding (scaled) noise?
|
|
"random_timesteps": 10000,
|
|
# Gaussian stddev of action noise for exploration.
|
|
"stddev": 0.1,
|
|
# Scaling settings by which the Gaussian noise is scaled before
|
|
# being added to the actions. NOTE: The scale timesteps start only
|
|
# after(!) any random steps have been finished.
|
|
# By default, do not anneal over time (fixed 1.0).
|
|
"initial_scale": 1.0,
|
|
"final_scale": 1.0,
|
|
"scale_timesteps": 1
|
|
},
|
|
|
|
# other changes & things we want to keep fixed:
|
|
# larger actor learning rate, no l2 regularisation, no Huber loss, etc.
|
|
"learning_starts": 10000,
|
|
"actor_hiddens": [400, 300],
|
|
"critic_hiddens": [400, 300],
|
|
"n_step": 1,
|
|
"gamma": 0.99,
|
|
"actor_lr": 1e-3,
|
|
"critic_lr": 1e-3,
|
|
"l2_reg": 0.0,
|
|
"tau": 5e-3,
|
|
"train_batch_size": 100,
|
|
"use_huber": False,
|
|
"target_network_update_freq": 0,
|
|
"num_workers": 0,
|
|
"num_gpus_per_worker": 0,
|
|
"worker_side_prioritization": False,
|
|
"buffer_size": 1000000,
|
|
"prioritized_replay": False,
|
|
"clip_rewards": False,
|
|
"use_state_preprocessor": False,
|
|
})
|
|
|
|
TD3Trainer = DDPGTrainer.with_updates(
|
|
name="TD3",
|
|
default_config=TD3_DEFAULT_CONFIG,
|
|
)
|