2019-04-07 00:36:18 -07:00
|
|
|
from ray.rllib.agents.trainer import with_common_config
|
2020-02-11 00:22:07 +01:00
|
|
|
from ray.rllib.agents.dqn.dqn import GenericOffPolicyTrainer
|
2019-05-20 16:46:05 -07:00
|
|
|
from ray.rllib.agents.ddpg.ddpg_policy import DDPGTFPolicy
|
2020-03-01 20:53:35 +01:00
|
|
|
from ray.rllib.utils.deprecation import deprecation_warning, \
|
|
|
|
DEPRECATED_VALUE
|
|
|
|
from ray.rllib.utils.exploration.per_worker_ornstein_uhlenbeck_noise import \
|
|
|
|
PerWorkerOrnsteinUhlenbeckNoise
|
2018-04-11 15:08:39 -07:00
|
|
|
|
2018-10-21 23:43:57 -07:00
|
|
|
# yapf: disable
|
2018-10-16 15:55:11 -07:00
|
|
|
# __sphinx_doc_begin__
|
2018-07-01 00:05:08 -07:00
|
|
|
DEFAULT_CONFIG = with_common_config({
|
2018-11-22 12:03:20 +08:00
|
|
|
# === Twin Delayed DDPG (TD3) and Soft Actor-Critic (SAC) tricks ===
|
|
|
|
# TD3: https://spinningup.openai.com/en/latest/algorithms/td3.html
|
2019-04-26 17:49:53 -07:00
|
|
|
# In addition to settings below, you can use "exploration_noise_type" and
|
|
|
|
# "exploration_gauss_act_noise" to get IID Gaussian exploration noise
|
|
|
|
# instead of OU exploration noise.
|
2018-11-22 12:03:20 +08:00
|
|
|
# twin Q-net
|
|
|
|
"twin_q": False,
|
|
|
|
# delayed policy update
|
|
|
|
"policy_delay": 1,
|
|
|
|
# target policy smoothing
|
2019-04-26 17:49:53 -07:00
|
|
|
# (this also replaces OU exploration noise with IID Gaussian exploration
|
|
|
|
# noise, for now)
|
2018-11-22 12:03:20 +08:00
|
|
|
"smooth_target_policy": False,
|
2019-04-26 17:49:53 -07:00
|
|
|
# gaussian stddev of target action noise for smoothing
|
2018-11-22 12:03:20 +08:00
|
|
|
"target_noise": 0.2,
|
|
|
|
# target noise limit (bound)
|
2019-04-26 17:49:53 -07:00
|
|
|
"target_noise_clip": 0.5,
|
2018-11-22 12:03:20 +08:00
|
|
|
|
2019-01-29 21:19:53 -08:00
|
|
|
# === Evaluation ===
|
|
|
|
# Evaluate with epsilon=0 every `evaluation_interval` training iterations.
|
|
|
|
# The evaluation stats will be reported under the "evaluation" metric key.
|
|
|
|
# Note that evaluation is currently not parallelized, and that for Ape-X
|
|
|
|
# metrics are already only reported for the lowest epsilon workers.
|
|
|
|
"evaluation_interval": None,
|
|
|
|
# Number of episodes to run per evaluation period.
|
|
|
|
"evaluation_num_episodes": 10,
|
|
|
|
|
2018-04-30 00:18:15 -07:00
|
|
|
# === Model ===
|
2019-04-26 17:49:53 -07:00
|
|
|
# Apply a state preprocessor with spec given by the "model" config option
|
|
|
|
# (like other RL algorithms). This is mostly useful if you have a weird
|
2019-09-04 21:39:22 -07:00
|
|
|
# observation shape, like an image. Disabled by default.
|
2019-04-26 17:49:53 -07:00
|
|
|
"use_state_preprocessor": False,
|
|
|
|
# Postprocess the policy network model output with these hidden layers. If
|
|
|
|
# use_state_preprocessor is False, then these will be the *only* hidden
|
|
|
|
# layers in the network.
|
|
|
|
"actor_hiddens": [400, 300],
|
|
|
|
# Hidden layers activation of the postprocessing stage of the policy
|
|
|
|
# network
|
2018-06-29 04:41:04 +03:00
|
|
|
"actor_hidden_activation": "relu",
|
2019-04-26 17:49:53 -07:00
|
|
|
# Postprocess the critic network model output with these hidden layers;
|
|
|
|
# again, if use_state_preprocessor is True, then the state will be
|
|
|
|
# preprocessed by the model specified with the "model" config option first.
|
|
|
|
"critic_hiddens": [400, 300],
|
|
|
|
# Hidden layers activation of the postprocessing state of the critic.
|
2018-06-29 04:41:04 +03:00
|
|
|
"critic_hidden_activation": "relu",
|
2018-04-30 00:18:15 -07:00
|
|
|
# N-step Q learning
|
2018-06-09 00:21:35 -07:00
|
|
|
"n_step": 1,
|
2018-04-30 00:18:15 -07:00
|
|
|
|
|
|
|
# === Exploration ===
|
2020-03-01 20:53:35 +01:00
|
|
|
"exploration_config": {
|
|
|
|
# DDPG uses OrnsteinUhlenbeck (stateful) noise to be added to NN-output
|
|
|
|
# actions (after a possible pure random phase of n timesteps).
|
|
|
|
"type": "OrnsteinUhlenbeckNoise",
|
|
|
|
# For how many timesteps should we return completely random actions,
|
|
|
|
# before we start adding (scaled) noise?
|
|
|
|
"random_timesteps": 1000,
|
|
|
|
# The OU-base scaling factor to always apply to action-added noise.
|
|
|
|
"ou_base_scale": 0.1,
|
|
|
|
# The OU theta param.
|
|
|
|
"ou_theta": 0.15,
|
|
|
|
# The OU sigma param.
|
|
|
|
"ou_sigma": 0.2,
|
|
|
|
# The initial noise scaling factor.
|
|
|
|
"initial_scale": 1.0,
|
|
|
|
# The final noise scaling factor.
|
|
|
|
"final_scale": 1.0,
|
|
|
|
# Timesteps over which to anneal scale (from initial to final values).
|
|
|
|
"scale_timesteps": 10000,
|
|
|
|
},
|
2018-04-30 00:18:15 -07:00
|
|
|
# Number of env steps to optimize for before returning
|
2018-06-09 00:21:35 -07:00
|
|
|
"timesteps_per_iteration": 1000,
|
2020-04-03 19:44:25 +02:00
|
|
|
|
|
|
|
# TODO(sven): Move to Exploration API's (ParameterNoise class).
|
2019-02-21 14:35:18 +08:00
|
|
|
# If True parameter space noise will be used for exploration
|
|
|
|
# See https://blog.openai.com/better-exploration-with-parameter-noise/
|
|
|
|
"parameter_noise": False,
|
2020-04-03 19:44:25 +02:00
|
|
|
|
2019-06-07 16:45:36 -07:00
|
|
|
# Extra configuration that disables exploration.
|
|
|
|
"evaluation_config": {
|
2020-03-01 20:53:35 +01:00
|
|
|
"explore": False
|
2019-06-07 16:45:36 -07:00
|
|
|
},
|
2018-04-30 00:18:15 -07:00
|
|
|
# === Replay buffer ===
|
|
|
|
# Size of the replay buffer. Note that if async_updates is set, then
|
|
|
|
# each worker will have a replay buffer of this size.
|
2018-06-09 00:21:35 -07:00
|
|
|
"buffer_size": 50000,
|
2018-04-30 00:18:15 -07:00
|
|
|
# If True prioritized replay buffer will be used.
|
2018-06-09 00:21:35 -07:00
|
|
|
"prioritized_replay": True,
|
2018-04-30 00:18:15 -07:00
|
|
|
# Alpha parameter for prioritized replay buffer.
|
2018-06-09 00:21:35 -07:00
|
|
|
"prioritized_replay_alpha": 0.6,
|
2018-04-30 00:18:15 -07:00
|
|
|
# Beta parameter for sampling from prioritized replay buffer.
|
2018-06-09 00:21:35 -07:00
|
|
|
"prioritized_replay_beta": 0.4,
|
2020-02-11 00:22:07 +01:00
|
|
|
# Time steps over which the beta parameter is annealed.
|
|
|
|
"prioritized_replay_beta_annealing_timesteps": 20000,
|
2019-06-07 16:45:36 -07:00
|
|
|
# Final value of beta
|
|
|
|
"final_prioritized_replay_beta": 0.4,
|
2018-04-30 00:18:15 -07:00
|
|
|
# Epsilon to add to the TD errors when updating priorities.
|
2018-06-09 00:21:35 -07:00
|
|
|
"prioritized_replay_eps": 1e-6,
|
2018-07-19 15:58:09 -07:00
|
|
|
# Whether to LZ4 compress observations
|
|
|
|
"compress_observations": False,
|
2018-04-30 00:18:15 -07:00
|
|
|
|
|
|
|
# === Optimization ===
|
2019-04-26 17:49:53 -07:00
|
|
|
# Learning rate for the critic (Q-function) optimizer.
|
|
|
|
"critic_lr": 1e-3,
|
|
|
|
# Learning rate for the actor (policy) optimizer.
|
|
|
|
"actor_lr": 1e-3,
|
|
|
|
# Update the target network every `target_network_update_freq` steps.
|
|
|
|
"target_network_update_freq": 0,
|
|
|
|
# Update the target by \tau * policy + (1-\tau) * target_policy
|
|
|
|
"tau": 0.002,
|
2018-04-30 00:18:15 -07:00
|
|
|
# If True, use huber loss instead of squared loss for critic network
|
|
|
|
# Conventionally, no need to clip gradients if using a huber loss
|
2018-06-09 00:21:35 -07:00
|
|
|
"use_huber": False,
|
2018-04-30 00:18:15 -07:00
|
|
|
# Threshold of a huber loss
|
2018-06-09 00:21:35 -07:00
|
|
|
"huber_threshold": 1.0,
|
2018-04-30 00:18:15 -07:00
|
|
|
# Weights for L2 regularization
|
2018-06-09 00:21:35 -07:00
|
|
|
"l2_reg": 1e-6,
|
2018-04-30 00:18:15 -07:00
|
|
|
# If not None, clip gradients during optimization at this value
|
2018-06-09 00:21:35 -07:00
|
|
|
"grad_norm_clipping": None,
|
2018-04-30 00:18:15 -07:00
|
|
|
# How many steps of the model to sample before learning starts.
|
2018-06-09 00:21:35 -07:00
|
|
|
"learning_starts": 1500,
|
2018-04-30 00:18:15 -07:00
|
|
|
# Update the replay buffer with this many samples at once. Note that this
|
|
|
|
# setting applies per-worker if num_workers > 1.
|
2020-03-14 12:05:04 -07:00
|
|
|
"rollout_fragment_length": 1,
|
2018-04-30 00:18:15 -07:00
|
|
|
# Size of a batched sampled from replay buffer for training. Note that
|
|
|
|
# if async_updates is set, then each worker returns gradients for a
|
|
|
|
# batch of this size.
|
2018-06-09 00:21:35 -07:00
|
|
|
"train_batch_size": 256,
|
2018-04-11 15:08:39 -07:00
|
|
|
|
2018-04-30 00:18:15 -07:00
|
|
|
# === Parallelism ===
|
|
|
|
# Number of workers for collecting samples with. This only makes sense
|
|
|
|
# to increase if your environment is particularly slow to sample, or if
|
2019-04-26 17:49:53 -07:00
|
|
|
# you're using the Async or Ape-X optimizers.
|
2018-06-09 00:21:35 -07:00
|
|
|
"num_workers": 0,
|
2018-04-30 00:18:15 -07:00
|
|
|
# Whether to compute priorities on workers.
|
2018-06-25 22:33:57 -07:00
|
|
|
"worker_side_prioritization": False,
|
2018-07-30 13:25:35 -07:00
|
|
|
# Prevent iterations from going lower than this time span
|
|
|
|
"min_iter_time_s": 1,
|
2018-07-01 00:05:08 -07:00
|
|
|
})
|
2018-10-16 15:55:11 -07:00
|
|
|
# __sphinx_doc_end__
|
2018-10-21 23:43:57 -07:00
|
|
|
# yapf: enable
|
2018-10-16 15:55:11 -07:00
|
|
|
|
2018-04-11 15:08:39 -07:00
|
|
|
|
2020-01-18 03:48:44 +01:00
|
|
|
def validate_config(config):
|
|
|
|
# PyTorch check.
|
|
|
|
if config["use_pytorch"]:
|
|
|
|
raise ValueError("DDPG does not support PyTorch yet! Use tf instead.")
|
|
|
|
|
2020-03-01 20:53:35 +01:00
|
|
|
# TODO(sven): Remove at some point.
|
|
|
|
# Backward compatibility of noise-based exploration config.
|
|
|
|
schedule_max_timesteps = None
|
|
|
|
if config.get("schedule_max_timesteps", DEPRECATED_VALUE) != \
|
|
|
|
DEPRECATED_VALUE:
|
|
|
|
deprecation_warning("schedule_max_timesteps",
|
|
|
|
"exploration_config.scale_timesteps")
|
|
|
|
schedule_max_timesteps = config["schedule_max_timesteps"]
|
|
|
|
if config.get("exploration_final_scale", DEPRECATED_VALUE) != \
|
|
|
|
DEPRECATED_VALUE:
|
|
|
|
deprecation_warning("exploration_final_scale",
|
|
|
|
"exploration_config.final_scale")
|
|
|
|
if isinstance(config["exploration_config"], dict):
|
|
|
|
config["exploration_config"]["final_scale"] = \
|
|
|
|
config.pop("exploration_final_scale")
|
|
|
|
if config.get("exploration_fraction", DEPRECATED_VALUE) != \
|
|
|
|
DEPRECATED_VALUE:
|
|
|
|
assert schedule_max_timesteps is not None
|
|
|
|
deprecation_warning("exploration_fraction",
|
|
|
|
"exploration_config.scale_timesteps")
|
|
|
|
if isinstance(config["exploration_config"], dict):
|
|
|
|
config["exploration_config"]["scale_timesteps"] = config.pop(
|
|
|
|
"exploration_fraction") * schedule_max_timesteps
|
|
|
|
if config.get("per_worker_exploration", DEPRECATED_VALUE) != \
|
|
|
|
DEPRECATED_VALUE:
|
|
|
|
deprecation_warning(
|
|
|
|
"per_worker_exploration",
|
|
|
|
"exploration_config.type=PerWorkerOrnsteinUhlenbeckNoise")
|
|
|
|
if isinstance(config["exploration_config"], dict):
|
|
|
|
config["exploration_config"]["type"] = \
|
|
|
|
PerWorkerOrnsteinUhlenbeckNoise
|
|
|
|
|
2020-01-18 03:48:44 +01:00
|
|
|
|
2019-06-07 16:45:36 -07:00
|
|
|
DDPGTrainer = GenericOffPolicyTrainer.with_updates(
|
|
|
|
name="DDPG",
|
|
|
|
default_config=DEFAULT_CONFIG,
|
|
|
|
default_policy=DDPGTFPolicy,
|
2020-04-06 20:56:16 +02:00
|
|
|
get_policy_class=None,
|
2020-01-18 03:48:44 +01:00
|
|
|
validate_config=validate_config,
|
2020-03-01 20:53:35 +01:00
|
|
|
)
|