ray/rllib/evaluation/postprocessing.py

92 lines
3.3 KiB
Python
Raw Normal View History

import numpy as np
import scipy.signal
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import DeveloperAPI
def discount_cumsum(x: np.ndarray, gamma: float) -> float:
"""Calculates the discounted cumulative sum over a reward sequence `x`.
y[t] - discount*y[t+1] = x[t]
reversed(y)[t] - discount*reversed(y)[t-1] = reversed(x)[t]
Args:
gamma (float): The discount factor gamma.
Returns:
float: The discounted cumulative sum over the reward sequence `x`.
"""
return scipy.signal.lfilter([1], [1, float(-gamma)], x[::-1], axis=0)[::-1]
class Postprocessing:
"""Constant definitions for postprocessing."""
ADVANTAGES = "advantages"
VALUE_TARGETS = "value_targets"
@DeveloperAPI
def compute_advantages(rollout: SampleBatch,
last_r: float,
gamma: float = 0.9,
lambda_: float = 1.0,
use_gae: bool = True,
use_critic: bool = True):
"""
Given a rollout, compute its value targets and the advantages.
Args:
rollout (SampleBatch): SampleBatch of a single trajectory.
last_r (float): Value estimation for last observation.
gamma (float): Discount factor.
lambda_ (float): Parameter for GAE.
use_gae (bool): Using Generalized Advantage Estimation.
use_critic (bool): Whether to use critic (value estimates). Setting
this to False will use 0 as baseline.
Returns:
SampleBatch (SampleBatch): Object with experience from rollout and
[rllib] Modularize Torch and TF policy graphs (#2294) * wip * cls * re * wip * wip * a3c working * torch support * pg works * lint * rm v2 * consumer id * clean up pg * clean up more * fix python 2.7 * tf session management * docs * dqn wip * fix compile * dqn * apex runs * up * impotrs * ddpg * quotes * fix tests * fix last r * fix tests * lint * pass checkpoint restore * kwar * nits * policy graph * fix yapf * com * class * pyt * vectorization * update * test cpe * unit test * fix ddpg2 * changes * wip * args * faster test * common * fix * add alg option * batch mode and policy serving * multi serving test * todo * wip * serving test * doc async env * num envs * comments * thread * remove init hook * update * fix ppo * comments1 * fix * updates * add jenkins tests * fix * fix pytorch * fix * fixes * fix a3c policy * fix squeeze * fix trunc on apex * fix squeezing for real * update * remove horizon test for now * multiagent wip * update * fix race condition * fix ma * t * doc * st * wip * example * wip * working * cartpole * wip * batch wip * fix bug * make other_batches None default * working * debug * nit * warn * comments * fix ppo * fix obs filter * update * wip * tf * update * fix * cleanup * cleanup * spacing * model * fix * dqn * fix ddpg * doc * keep names * update * fix * com * docs * clarify model outputs * Update torch_policy_graph.py * fix obs filter * pass thru worker index * fix * rename * vlad torch comments * fix log action * debug name * fix lstm * remove unused ddpg net * remove conv net * revert lstm * cast * clean up * fix lstm check * move to end * fix sphinx * fix cmd * remove bad doc * clarify * copy * async sa * fix
2018-06-26 13:17:15 -07:00
processed rewards.
"""
assert SampleBatch.VF_PREDS in rollout or not use_critic, \
"use_critic=True but values not found"
assert use_critic or not use_gae, \
"Can't use gae without using a value function"
if use_gae:
vpred_t = np.concatenate(
[rollout[SampleBatch.VF_PREDS],
np.array([last_r])])
delta_t = (
rollout[SampleBatch.REWARDS] + gamma * vpred_t[1:] - vpred_t[:-1])
# This formula for the advantage comes from:
# "Generalized Advantage Estimation": https://arxiv.org/abs/1506.02438
rollout[Postprocessing.ADVANTAGES] = discount_cumsum(
delta_t, gamma * lambda_)
rollout[Postprocessing.VALUE_TARGETS] = (
rollout[Postprocessing.ADVANTAGES] +
rollout[SampleBatch.VF_PREDS]).astype(np.float32)
else:
rewards_plus_v = np.concatenate(
[rollout[SampleBatch.REWARDS],
np.array([last_r])])
discounted_returns = discount_cumsum(rewards_plus_v,
gamma)[:-1].astype(np.float32)
if use_critic:
rollout[Postprocessing.
ADVANTAGES] = discounted_returns - rollout[SampleBatch.
VF_PREDS]
rollout[Postprocessing.VALUE_TARGETS] = discounted_returns
else:
rollout[Postprocessing.ADVANTAGES] = discounted_returns
rollout[Postprocessing.VALUE_TARGETS] = np.zeros_like(
rollout[Postprocessing.ADVANTAGES])
rollout[Postprocessing.ADVANTAGES] = rollout[
Postprocessing.ADVANTAGES].astype(np.float32)
return rollout