2020-09-02 14:03:01 +02:00
|
|
|
"""
|
|
|
|
Proximal Policy Optimization (PPO)
|
|
|
|
==================================
|
|
|
|
|
|
|
|
This file defines the distributed Trainer class for proximal policy
|
|
|
|
optimization.
|
|
|
|
See `ppo_[tf|torch]_policy.py` for the definition of the policy loss.
|
|
|
|
|
2020-09-19 03:30:45 -04:00
|
|
|
Detailed documentation: https://docs.ray.io/en/master/rllib-algorithms.html#ppo
|
2020-09-02 14:03:01 +02:00
|
|
|
"""
|
|
|
|
|
2018-10-21 23:43:57 -07:00
|
|
|
import logging
|
2021-11-23 23:01:05 +01:00
|
|
|
from typing import Type
|
2018-10-21 23:43:57 -07:00
|
|
|
|
2019-05-18 00:23:11 -07:00
|
|
|
from ray.rllib.agents import with_common_config
|
2020-01-21 08:06:50 +01:00
|
|
|
from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy
|
2021-11-23 23:01:05 +01:00
|
|
|
from ray.rllib.agents.trainer import Trainer
|
2020-09-02 14:03:01 +02:00
|
|
|
from ray.rllib.evaluation.worker_set import WorkerSet
|
2020-04-30 01:18:09 -07:00
|
|
|
from ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches, \
|
|
|
|
StandardizeFields, SelectExperiences
|
2021-07-20 14:58:13 -04:00
|
|
|
from ray.rllib.execution.train_ops import TrainOneStep, MultiGPUTrainOneStep
|
2020-04-30 01:18:09 -07:00
|
|
|
from ray.rllib.execution.metric_ops import StandardMetricsReporting
|
2021-09-30 16:39:05 +02:00
|
|
|
from ray.rllib.policy.policy import Policy
|
2020-12-27 09:46:03 -05:00
|
|
|
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
|
2021-11-23 23:01:05 +01:00
|
|
|
from ray.rllib.utils.annotations import override
|
2021-01-19 09:51:35 +01:00
|
|
|
from ray.rllib.utils.deprecation import DEPRECATED_VALUE
|
2021-09-30 16:39:05 +02:00
|
|
|
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO, \
|
|
|
|
LEARNER_STATS_KEY
|
2020-09-02 14:03:01 +02:00
|
|
|
from ray.rllib.utils.typing import TrainerConfigDict
|
|
|
|
from ray.util.iter import LocalIterator
|
2020-01-21 08:06:50 +01:00
|
|
|
|
2018-10-21 23:43:57 -07:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
# yapf: disable
|
2018-10-16 15:55:11 -07:00
|
|
|
# __sphinx_doc_begin__
|
2020-09-02 14:03:01 +02:00
|
|
|
|
|
|
|
# Adds the following updates to the (base) `Trainer` config in
|
|
|
|
# rllib/agents/trainer.py (`COMMON_CONFIG` dict).
|
2018-07-01 00:05:08 -07:00
|
|
|
DEFAULT_CONFIG = with_common_config({
|
2020-02-01 08:25:45 +02:00
|
|
|
# Should use a critic as a baseline (otherwise don't use value baseline;
|
|
|
|
# required for using GAE).
|
|
|
|
"use_critic": True,
|
2017-08-23 20:35:47 -07:00
|
|
|
# If true, use the Generalized Advantage Estimator (GAE)
|
|
|
|
# with a value function, see https://arxiv.org/pdf/1506.02438.pdf.
|
|
|
|
"use_gae": True,
|
2020-12-07 13:08:17 +01:00
|
|
|
# The GAE (lambda) parameter.
|
2017-08-05 22:13:30 -07:00
|
|
|
"lambda": 1.0,
|
2019-11-18 10:39:07 -08:00
|
|
|
# Initial coefficient for KL divergence.
|
2017-06-25 15:13:03 -07:00
|
|
|
"kl_coeff": 0.2,
|
2019-11-18 10:39:07 -08:00
|
|
|
# Size of batches collected from each worker.
|
2020-03-14 12:05:04 -07:00
|
|
|
"rollout_fragment_length": 200,
|
2019-11-18 10:39:07 -08:00
|
|
|
# Number of timesteps collected for each SGD round. This defines the size
|
|
|
|
# of each SGD epoch.
|
2018-09-05 12:06:13 -07:00
|
|
|
"train_batch_size": 4000,
|
2019-11-18 10:39:07 -08:00
|
|
|
# Total SGD batch size across all devices for SGD. This defines the
|
|
|
|
# minibatch size within each epoch.
|
2018-09-05 12:06:13 -07:00
|
|
|
"sgd_minibatch_size": 128,
|
2019-11-18 10:39:07 -08:00
|
|
|
# Whether to shuffle sequences in the batch when training (recommended).
|
2019-07-06 20:40:49 -07:00
|
|
|
"shuffle_sequences": True,
|
2019-11-18 10:39:07 -08:00
|
|
|
# Number of SGD iterations in each outer loop (i.e., number of epochs to
|
|
|
|
# execute per train batch).
|
2017-06-25 15:13:03 -07:00
|
|
|
"num_sgd_iter": 30,
|
2019-11-18 10:39:07 -08:00
|
|
|
# Stepsize of SGD.
|
2018-09-05 12:06:13 -07:00
|
|
|
"lr": 5e-5,
|
2019-11-18 10:39:07 -08:00
|
|
|
# Learning rate schedule.
|
2018-08-23 17:49:10 -07:00
|
|
|
"lr_schedule": None,
|
2019-11-18 10:39:07 -08:00
|
|
|
# Coefficient of the value function loss. IMPORTANT: you must tune this if
|
2021-01-19 09:51:35 +01:00
|
|
|
# you set vf_share_layers=True inside your model's config.
|
2017-08-23 20:35:47 -07:00
|
|
|
"vf_loss_coeff": 1.0,
|
2021-01-19 09:51:35 +01:00
|
|
|
"model": {
|
|
|
|
# Share layers for value function. If you set this to True, it's
|
|
|
|
# important to tune vf_loss_coeff.
|
|
|
|
"vf_share_layers": False,
|
|
|
|
},
|
2019-11-18 10:39:07 -08:00
|
|
|
# Coefficient of the entropy regularizer.
|
2017-06-25 15:13:03 -07:00
|
|
|
"entropy_coeff": 0.0,
|
2019-11-18 10:39:07 -08:00
|
|
|
# Decay schedule for the entropy regularizer.
|
2019-07-09 03:30:32 +02:00
|
|
|
"entropy_coeff_schedule": None,
|
2019-11-18 10:39:07 -08:00
|
|
|
# PPO clip parameter.
|
2017-06-25 15:13:03 -07:00
|
|
|
"clip_param": 0.3,
|
2018-09-23 13:11:17 -07:00
|
|
|
# Clip param for the value function. Note that this is sensitive to the
|
|
|
|
# scale of the rewards. If your expected V is large, increase this.
|
|
|
|
"vf_clip_param": 10.0,
|
2019-11-18 10:39:07 -08:00
|
|
|
# If specified, clip the global norm of gradients by this amount.
|
2019-02-02 22:10:58 -08:00
|
|
|
"grad_clip": None,
|
2019-11-18 10:39:07 -08:00
|
|
|
# Target value for KL divergence.
|
2017-06-25 15:13:03 -07:00
|
|
|
"kl_target": 0.01,
|
2019-11-18 10:39:07 -08:00
|
|
|
# Whether to rollout "complete_episodes" or "truncate_episodes".
|
2018-09-30 18:37:55 -07:00
|
|
|
"batch_mode": "truncate_episodes",
|
2019-11-18 10:39:07 -08:00
|
|
|
# Which observation filter to apply to the observation.
|
2019-03-01 13:19:33 -08:00
|
|
|
"observation_filter": "NoFilter",
|
2021-01-19 09:51:35 +01:00
|
|
|
|
|
|
|
# Deprecated keys:
|
|
|
|
# Share layers for value function. If you set this to True, it's important
|
|
|
|
# to tune vf_loss_coeff.
|
|
|
|
# Use config.model.vf_share_layers instead.
|
|
|
|
"vf_share_layers": DEPRECATED_VALUE,
|
2018-07-01 00:05:08 -07:00
|
|
|
})
|
2020-09-02 14:03:01 +02:00
|
|
|
|
2018-10-16 15:55:11 -07:00
|
|
|
# __sphinx_doc_end__
|
2018-10-21 23:43:57 -07:00
|
|
|
# yapf: enable
|
2018-10-16 15:55:11 -07:00
|
|
|
|
2017-06-25 15:13:03 -07:00
|
|
|
|
2020-05-21 10:16:18 -07:00
|
|
|
class UpdateKL:
|
2020-09-02 14:03:01 +02:00
|
|
|
"""Callback to update the KL based on optimization info.
|
|
|
|
|
|
|
|
This is used inside the execution_plan function. The Policy must define
|
|
|
|
a `update_kl` method for this to work. This is achieved for PPO via a
|
|
|
|
Policy mixin class (which adds the `update_kl` method),
|
|
|
|
defined in ppo_[tf|torch]_policy.py.
|
|
|
|
"""
|
2020-05-21 10:16:18 -07:00
|
|
|
|
|
|
|
def __init__(self, workers):
|
|
|
|
self.workers = workers
|
|
|
|
|
|
|
|
def __call__(self, fetches):
|
|
|
|
def update(pi, pi_id):
|
2021-03-08 15:41:27 +01:00
|
|
|
assert LEARNER_STATS_KEY not in fetches, \
|
|
|
|
("{} should be nested under policy id key".format(
|
|
|
|
LEARNER_STATS_KEY), fetches)
|
2020-05-21 10:16:18 -07:00
|
|
|
if pi_id in fetches:
|
2021-03-08 15:41:27 +01:00
|
|
|
kl = fetches[pi_id][LEARNER_STATS_KEY].get("kl")
|
|
|
|
assert kl is not None, (fetches, pi_id)
|
2020-09-02 14:03:01 +02:00
|
|
|
# Make the actual `Policy.update_kl()` call.
|
2021-03-08 15:41:27 +01:00
|
|
|
pi.update_kl(kl)
|
2020-05-21 10:16:18 -07:00
|
|
|
else:
|
|
|
|
logger.warning("No data for {}, not updating kl".format(pi_id))
|
|
|
|
|
2020-09-02 14:03:01 +02:00
|
|
|
# Update KL on all trainable policies within the local (trainer)
|
|
|
|
# Worker.
|
2022-01-27 12:17:34 +01:00
|
|
|
self.workers.local_worker().foreach_policy_to_train(update)
|
2020-05-21 10:16:18 -07:00
|
|
|
|
|
|
|
|
2020-09-02 14:03:01 +02:00
|
|
|
def warn_about_bad_reward_scales(config, result):
|
|
|
|
if result["policy_reward_mean"]:
|
|
|
|
return result # Punt on handling multiagent case.
|
|
|
|
|
|
|
|
# Warn about excessively high VF loss.
|
2021-09-30 16:39:05 +02:00
|
|
|
learner_info = result["info"][LEARNER_INFO]
|
|
|
|
if DEFAULT_POLICY_ID in learner_info:
|
2021-03-08 15:41:27 +01:00
|
|
|
scaled_vf_loss = config["vf_loss_coeff"] * \
|
2021-09-30 16:39:05 +02:00
|
|
|
learner_info[DEFAULT_POLICY_ID][LEARNER_STATS_KEY]["vf_loss"]
|
2021-03-08 15:41:27 +01:00
|
|
|
|
2021-09-30 16:39:05 +02:00
|
|
|
policy_loss = learner_info[DEFAULT_POLICY_ID][LEARNER_STATS_KEY][
|
2021-03-08 15:41:27 +01:00
|
|
|
"policy_loss"]
|
2021-01-19 09:51:35 +01:00
|
|
|
if config.get("model", {}).get("vf_share_layers") and \
|
|
|
|
scaled_vf_loss > 100:
|
2020-09-02 14:03:01 +02:00
|
|
|
logger.warning(
|
|
|
|
"The magnitude of your value function loss is extremely large "
|
|
|
|
"({}) compared to the policy loss ({}). This can prevent the "
|
|
|
|
"policy from learning. Consider scaling down the VF loss by "
|
|
|
|
"reducing vf_loss_coeff, or disabling vf_share_layers.".format(
|
|
|
|
scaled_vf_loss, policy_loss))
|
|
|
|
|
|
|
|
# Warn about bad clipping configs
|
|
|
|
if config["vf_clip_param"] <= 0:
|
|
|
|
rew_scale = float("inf")
|
|
|
|
else:
|
|
|
|
rew_scale = round(
|
|
|
|
abs(result["episode_reward_mean"]) / config["vf_clip_param"], 0)
|
|
|
|
if rew_scale > 200:
|
|
|
|
logger.warning(
|
|
|
|
"The magnitude of your environment rewards are more than "
|
|
|
|
"{}x the scale of `vf_clip_param`. ".format(rew_scale) +
|
|
|
|
"This means that it will take more than "
|
|
|
|
"{} iterations for your value ".format(rew_scale) +
|
|
|
|
"function to converge. If this is not intended, consider "
|
|
|
|
"increasing `vf_clip_param`.")
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2021-11-23 23:01:05 +01:00
|
|
|
class PPOTrainer(Trainer):
|
|
|
|
@classmethod
|
|
|
|
@override(Trainer)
|
|
|
|
def get_default_config(cls) -> TrainerConfigDict:
|
|
|
|
return DEFAULT_CONFIG
|
|
|
|
|
|
|
|
@override(Trainer)
|
|
|
|
def validate_config(self, config: TrainerConfigDict) -> None:
|
|
|
|
"""Validates the Trainer's config dict.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
config (TrainerConfigDict): The Trainer's config to check.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
ValueError: In case something is wrong with the config.
|
|
|
|
"""
|
|
|
|
# Call super's validation method.
|
|
|
|
super().validate_config(config)
|
|
|
|
|
|
|
|
if isinstance(config["entropy_coeff"], int):
|
|
|
|
config["entropy_coeff"] = float(config["entropy_coeff"])
|
|
|
|
|
|
|
|
if config["entropy_coeff"] < 0.0:
|
|
|
|
raise DeprecationWarning("entropy_coeff must be >= 0.0")
|
|
|
|
|
|
|
|
# SGD minibatch size must be smaller than train_batch_size (b/c
|
|
|
|
# we subsample a batch of `sgd_minibatch_size` from the train-batch for
|
|
|
|
# each `sgd_num_iter`).
|
|
|
|
# Note: Only check this if `train_batch_size` > 0 (DDPPO sets this
|
|
|
|
# to -1 to auto-calculate the actual batch size later).
|
|
|
|
if config["train_batch_size"] > 0 and \
|
|
|
|
config["sgd_minibatch_size"] > config["train_batch_size"]:
|
|
|
|
raise ValueError("`sgd_minibatch_size` ({}) must be <= "
|
|
|
|
"`train_batch_size` ({}).".format(
|
|
|
|
config["sgd_minibatch_size"],
|
|
|
|
config["train_batch_size"]))
|
|
|
|
|
|
|
|
# Check for mismatches between `train_batch_size` and
|
|
|
|
# `rollout_fragment_length` and auto-adjust `rollout_fragment_length`
|
|
|
|
# if necessary.
|
|
|
|
# Note: Only check this if `train_batch_size` > 0 (DDPPO sets this
|
|
|
|
# to -1 to auto-calculate the actual batch size later).
|
|
|
|
num_workers = config["num_workers"] or 1
|
|
|
|
calculated_min_rollout_size = \
|
|
|
|
num_workers * config["num_envs_per_worker"] * \
|
|
|
|
config["rollout_fragment_length"]
|
|
|
|
if config["train_batch_size"] > 0 and \
|
|
|
|
config["train_batch_size"] % calculated_min_rollout_size != 0:
|
|
|
|
new_rollout_fragment_length = config["train_batch_size"] // (
|
|
|
|
num_workers * config["num_envs_per_worker"])
|
|
|
|
logger.warning(
|
|
|
|
"`train_batch_size` ({}) cannot be achieved with your other "
|
|
|
|
"settings (num_workers={} num_envs_per_worker={} "
|
|
|
|
"rollout_fragment_length={})! Auto-adjusting "
|
|
|
|
"`rollout_fragment_length` to {}.".format(
|
|
|
|
config["train_batch_size"], config["num_workers"],
|
|
|
|
config["num_envs_per_worker"],
|
|
|
|
config["rollout_fragment_length"],
|
|
|
|
new_rollout_fragment_length))
|
|
|
|
config["rollout_fragment_length"] = new_rollout_fragment_length
|
|
|
|
|
|
|
|
# Episodes may only be truncated (and passed into PPO's
|
|
|
|
# `postprocessing_fn`), iff generalized advantage estimation is used
|
|
|
|
# (value function estimate at end of truncated episode to estimate
|
|
|
|
# remaining value).
|
|
|
|
if config["batch_mode"] == "truncate_episodes" and \
|
|
|
|
not config["use_gae"]:
|
|
|
|
raise ValueError(
|
|
|
|
"Episode truncation is not supported without a value "
|
|
|
|
"function (to estimate the return at the end of the truncated"
|
|
|
|
" trajectory). Consider setting "
|
|
|
|
"batch_mode=complete_episodes.")
|
|
|
|
|
|
|
|
# Multi-agent mode and multi-GPU optimizer.
|
|
|
|
if config["multiagent"]["policies"] and \
|
|
|
|
not config["simple_optimizer"]:
|
|
|
|
logger.info(
|
|
|
|
"In multi-agent mode, policies will be optimized sequentially"
|
|
|
|
" by the multi-GPU optimizer. Consider setting "
|
|
|
|
"simple_optimizer=True if this doesn't work for you.")
|
|
|
|
|
|
|
|
@override(Trainer)
|
|
|
|
def get_default_policy_class(self,
|
|
|
|
config: TrainerConfigDict) -> Type[Policy]:
|
|
|
|
if config["framework"] == "torch":
|
|
|
|
from ray.rllib.agents.ppo.ppo_torch_policy import PPOTorchPolicy
|
|
|
|
return PPOTorchPolicy
|
|
|
|
else:
|
|
|
|
return PPOTFPolicy
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
@override(Trainer)
|
|
|
|
def execution_plan(workers: WorkerSet, config: TrainerConfigDict,
|
|
|
|
**kwargs) -> LocalIterator[dict]:
|
|
|
|
assert len(kwargs) == 0, (
|
|
|
|
"PPO execution_plan does NOT take any additional parameters")
|
|
|
|
|
|
|
|
rollouts = ParallelRollouts(workers, mode="bulk_sync")
|
|
|
|
|
|
|
|
# Collect batches for the trainable policies.
|
|
|
|
rollouts = rollouts.for_each(
|
2022-01-27 12:17:34 +01:00
|
|
|
SelectExperiences(local_worker=workers.local_worker()))
|
2021-11-23 23:01:05 +01:00
|
|
|
# Concatenate the SampleBatches into one.
|
|
|
|
rollouts = rollouts.combine(
|
|
|
|
ConcatBatches(
|
|
|
|
min_batch_size=config["train_batch_size"],
|
|
|
|
count_steps_by=config["multiagent"]["count_steps_by"],
|
|
|
|
))
|
|
|
|
# Standardize advantages.
|
|
|
|
rollouts = rollouts.for_each(StandardizeFields(["advantages"]))
|
|
|
|
|
|
|
|
# Perform one training step on the combined + standardized batch.
|
|
|
|
if config["simple_optimizer"]:
|
|
|
|
train_op = rollouts.for_each(
|
|
|
|
TrainOneStep(
|
|
|
|
workers,
|
|
|
|
num_sgd_iter=config["num_sgd_iter"],
|
|
|
|
sgd_minibatch_size=config["sgd_minibatch_size"]))
|
|
|
|
else:
|
|
|
|
train_op = rollouts.for_each(
|
|
|
|
MultiGPUTrainOneStep(
|
|
|
|
workers=workers,
|
|
|
|
sgd_minibatch_size=config["sgd_minibatch_size"],
|
|
|
|
num_sgd_iter=config["num_sgd_iter"],
|
|
|
|
num_gpus=config["num_gpus"],
|
2022-01-05 18:22:33 +01:00
|
|
|
_fake_gpus=config["_fake_gpus"]))
|
2021-11-23 23:01:05 +01:00
|
|
|
|
|
|
|
# Update KL after each round of training.
|
|
|
|
train_op = train_op.for_each(lambda t: t[1]).for_each(
|
|
|
|
UpdateKL(workers))
|
|
|
|
|
|
|
|
# Warn about bad reward scales and return training metrics.
|
|
|
|
return StandardMetricsReporting(train_op, workers, config) \
|
|
|
|
.for_each(lambda result: warn_about_bad_reward_scales(
|
|
|
|
config, result))
|