2017-06-25 15:13:03 -07:00
|
|
|
from __future__ import absolute_import
|
|
|
|
from __future__ import division
|
|
|
|
from __future__ import print_function
|
|
|
|
|
2018-10-21 23:43:57 -07:00
|
|
|
import logging
|
|
|
|
|
2019-05-18 00:23:11 -07:00
|
|
|
from ray.rllib.agents import with_common_config
|
2019-05-20 16:46:05 -07:00
|
|
|
from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy
|
2019-05-18 00:23:11 -07:00
|
|
|
from ray.rllib.agents.trainer_template import build_trainer
|
2018-07-12 19:22:46 +02:00
|
|
|
from ray.rllib.optimizers import SyncSamplesOptimizer, LocalMultiGPUOptimizer
|
2019-08-23 02:21:11 -04:00
|
|
|
from ray.rllib.utils import try_import_tf
|
2017-06-25 15:13:03 -07:00
|
|
|
|
2019-08-23 02:21:11 -04:00
|
|
|
tf = try_import_tf()
|
2018-10-21 23:43:57 -07:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
# yapf: disable
|
2018-10-16 15:55:11 -07:00
|
|
|
# __sphinx_doc_begin__
|
2018-07-01 00:05:08 -07:00
|
|
|
DEFAULT_CONFIG = with_common_config({
|
2017-08-23 20:35:47 -07:00
|
|
|
# If true, use the Generalized Advantage Estimator (GAE)
|
|
|
|
# with a value function, see https://arxiv.org/pdf/1506.02438.pdf.
|
|
|
|
"use_gae": True,
|
2017-08-05 22:13:30 -07:00
|
|
|
# GAE(lambda) parameter
|
|
|
|
"lambda": 1.0,
|
|
|
|
# Initial coefficient for KL divergence
|
2017-06-25 15:13:03 -07:00
|
|
|
"kl_coeff": 0.2,
|
2018-09-05 12:06:13 -07:00
|
|
|
# Size of batches collected from each worker
|
|
|
|
"sample_batch_size": 200,
|
2018-07-01 00:05:08 -07:00
|
|
|
# Number of timesteps collected for each SGD round
|
2018-09-05 12:06:13 -07:00
|
|
|
"train_batch_size": 4000,
|
2018-11-29 13:33:39 -08:00
|
|
|
# Total SGD batch size across all devices for SGD
|
2018-09-05 12:06:13 -07:00
|
|
|
"sgd_minibatch_size": 128,
|
2019-07-06 20:40:49 -07:00
|
|
|
# Whether to shuffle sequences in the batch when training (recommended)
|
|
|
|
"shuffle_sequences": True,
|
2017-08-05 22:13:30 -07:00
|
|
|
# Number of SGD iterations in each outer loop
|
2017-06-25 15:13:03 -07:00
|
|
|
"num_sgd_iter": 30,
|
2017-08-05 22:13:30 -07:00
|
|
|
# Stepsize of SGD
|
2018-09-05 12:06:13 -07:00
|
|
|
"lr": 5e-5,
|
2018-08-23 17:49:10 -07:00
|
|
|
# Learning rate schedule
|
|
|
|
"lr_schedule": None,
|
2019-07-28 14:07:18 -07:00
|
|
|
# Share layers for value function. If you set this to True, it's important
|
|
|
|
# to tune vf_loss_coeff.
|
2018-08-23 17:49:10 -07:00
|
|
|
"vf_share_layers": False,
|
2019-07-28 14:07:18 -07:00
|
|
|
# Coefficient of the value function loss. It's important to tune this if
|
|
|
|
# you set vf_share_layers: True
|
2017-08-23 20:35:47 -07:00
|
|
|
"vf_loss_coeff": 1.0,
|
2017-08-05 22:13:30 -07:00
|
|
|
# Coefficient of the entropy regularizer
|
2017-06-25 15:13:03 -07:00
|
|
|
"entropy_coeff": 0.0,
|
2019-07-09 03:30:32 +02:00
|
|
|
# Decay schedule for the entropy regularizer
|
|
|
|
"entropy_coeff_schedule": None,
|
2017-08-05 22:13:30 -07:00
|
|
|
# PPO clip parameter
|
2017-06-25 15:13:03 -07:00
|
|
|
"clip_param": 0.3,
|
2018-09-23 13:11:17 -07:00
|
|
|
# Clip param for the value function. Note that this is sensitive to the
|
|
|
|
# scale of the rewards. If your expected V is large, increase this.
|
|
|
|
"vf_clip_param": 10.0,
|
2019-02-02 22:10:58 -08:00
|
|
|
# If specified, clip the global norm of gradients by this amount
|
|
|
|
"grad_clip": None,
|
2017-08-05 22:13:30 -07:00
|
|
|
# Target value for KL divergence
|
2017-06-25 15:13:03 -07:00
|
|
|
"kl_target": 0.01,
|
2018-07-01 00:05:08 -07:00
|
|
|
# Whether to rollout "complete_episodes" or "truncate_episodes"
|
2018-09-30 18:37:55 -07:00
|
|
|
"batch_mode": "truncate_episodes",
|
2018-07-01 00:05:08 -07:00
|
|
|
# Which observation filter to apply to the observation
|
2019-03-01 13:19:33 -08:00
|
|
|
"observation_filter": "NoFilter",
|
2018-11-29 13:33:39 -08:00
|
|
|
# Uses the sync samples optimizer instead of the multi-gpu one. This does
|
|
|
|
# not support minibatches.
|
2018-07-12 19:22:46 +02:00
|
|
|
"simple_optimizer": False,
|
2018-07-01 00:05:08 -07:00
|
|
|
})
|
2018-10-16 15:55:11 -07:00
|
|
|
# __sphinx_doc_end__
|
2018-10-21 23:43:57 -07:00
|
|
|
# yapf: enable
|
2018-10-16 15:55:11 -07:00
|
|
|
|
2017-06-25 15:13:03 -07:00
|
|
|
|
2019-06-03 06:49:24 +08:00
|
|
|
def choose_policy_optimizer(workers, config):
|
2019-05-18 00:23:11 -07:00
|
|
|
if config["simple_optimizer"]:
|
|
|
|
return SyncSamplesOptimizer(
|
2019-06-03 06:49:24 +08:00
|
|
|
workers,
|
2019-05-18 00:23:11 -07:00
|
|
|
num_sgd_iter=config["num_sgd_iter"],
|
2019-08-23 02:21:11 -04:00
|
|
|
train_batch_size=config["train_batch_size"],
|
|
|
|
sgd_minibatch_size=config["sgd_minibatch_size"])
|
2019-05-18 00:23:11 -07:00
|
|
|
|
|
|
|
return LocalMultiGPUOptimizer(
|
2019-06-03 06:49:24 +08:00
|
|
|
workers,
|
2019-05-18 00:23:11 -07:00
|
|
|
sgd_batch_size=config["sgd_minibatch_size"],
|
|
|
|
num_sgd_iter=config["num_sgd_iter"],
|
|
|
|
num_gpus=config["num_gpus"],
|
|
|
|
sample_batch_size=config["sample_batch_size"],
|
|
|
|
num_envs_per_worker=config["num_envs_per_worker"],
|
|
|
|
train_batch_size=config["train_batch_size"],
|
|
|
|
standardize_fields=["advantages"],
|
2019-07-06 20:40:49 -07:00
|
|
|
shuffle_sequences=config["shuffle_sequences"])
|
2019-05-18 00:23:11 -07:00
|
|
|
|
|
|
|
|
|
|
|
def update_kl(trainer, fetches):
|
|
|
|
if "kl" in fetches:
|
|
|
|
# single-agent
|
2019-06-03 06:49:24 +08:00
|
|
|
trainer.workers.local_worker().for_policy(
|
2019-05-18 00:23:11 -07:00
|
|
|
lambda pi: pi.update_kl(fetches["kl"]))
|
|
|
|
else:
|
|
|
|
|
|
|
|
def update(pi, pi_id):
|
|
|
|
if pi_id in fetches:
|
|
|
|
pi.update_kl(fetches[pi_id]["kl"])
|
|
|
|
else:
|
|
|
|
logger.debug("No data for {}, not updating kl".format(pi_id))
|
|
|
|
|
|
|
|
# multi-agent
|
2019-06-03 06:49:24 +08:00
|
|
|
trainer.workers.local_worker().foreach_trainable_policy(update)
|
2019-05-18 00:23:11 -07:00
|
|
|
|
|
|
|
|
|
|
|
def warn_about_bad_reward_scales(trainer, result):
|
|
|
|
# Warn about bad clipping configs
|
|
|
|
if trainer.config["vf_clip_param"] <= 0:
|
|
|
|
rew_scale = float("inf")
|
|
|
|
elif result["policy_reward_mean"]:
|
|
|
|
rew_scale = 0 # punt on handling multiagent case
|
|
|
|
else:
|
|
|
|
rew_scale = round(
|
|
|
|
abs(result["episode_reward_mean"]) /
|
|
|
|
trainer.config["vf_clip_param"], 0)
|
|
|
|
if rew_scale > 200:
|
|
|
|
logger.warning(
|
|
|
|
"The magnitude of your environment rewards are more than "
|
|
|
|
"{}x the scale of `vf_clip_param`. ".format(rew_scale) +
|
|
|
|
"This means that it will take more than "
|
|
|
|
"{} iterations for your value ".format(rew_scale) +
|
|
|
|
"function to converge. If this is not intended, consider "
|
|
|
|
"increasing `vf_clip_param`.")
|
|
|
|
|
|
|
|
|
|
|
|
def validate_config(config):
|
|
|
|
if config["entropy_coeff"] < 0:
|
|
|
|
raise DeprecationWarning("entropy_coeff must be >= 0")
|
|
|
|
if config["sgd_minibatch_size"] > config["train_batch_size"]:
|
|
|
|
raise ValueError(
|
|
|
|
"Minibatch size {} must be <= train batch size {}.".format(
|
|
|
|
config["sgd_minibatch_size"], config["train_batch_size"]))
|
2019-07-09 03:30:32 +02:00
|
|
|
if config["batch_mode"] == "truncate_episodes" and not config["use_gae"]:
|
2019-05-18 00:23:11 -07:00
|
|
|
raise ValueError(
|
|
|
|
"Episode truncation is not supported without a value "
|
|
|
|
"function. Consider setting batch_mode=complete_episodes.")
|
2019-07-09 03:30:32 +02:00
|
|
|
if config["multiagent"]["policies"] and not config["simple_optimizer"]:
|
2019-05-18 00:23:11 -07:00
|
|
|
logger.info(
|
|
|
|
"In multi-agent mode, policies will be optimized sequentially "
|
|
|
|
"by the multi-GPU optimizer. Consider setting "
|
|
|
|
"simple_optimizer=True if this doesn't work for you.")
|
2019-08-01 13:03:59 -07:00
|
|
|
if config["simple_optimizer"]:
|
|
|
|
logger.warning(
|
2019-08-23 02:21:11 -04:00
|
|
|
"Using the simple minibatch optimizer. This will significantly "
|
2019-08-01 13:03:59 -07:00
|
|
|
"reduce performance, consider simple_optimizer=False.")
|
2019-08-23 02:21:11 -04:00
|
|
|
elif tf and tf.executing_eagerly():
|
|
|
|
config["simple_optimizer"] = True # multi-gpu not supported
|
2019-05-18 00:23:11 -07:00
|
|
|
|
|
|
|
|
|
|
|
PPOTrainer = build_trainer(
|
2019-06-03 06:49:24 +08:00
|
|
|
name="PPO",
|
2019-05-18 00:23:11 -07:00
|
|
|
default_config=DEFAULT_CONFIG,
|
|
|
|
default_policy=PPOTFPolicy,
|
2019-05-27 14:17:32 -07:00
|
|
|
make_policy_optimizer=choose_policy_optimizer,
|
2019-05-18 00:23:11 -07:00
|
|
|
validate_config=validate_config,
|
|
|
|
after_optimizer_step=update_kl,
|
|
|
|
after_train_result=warn_about_bad_reward_scales)
|