2017-12-14 01:08:23 -08:00
|
|
|
import numpy as np
|
|
|
|
import scipy.signal
|
2019-05-20 16:46:05 -07:00
|
|
|
from ray.rllib.policy.sample_batch import SampleBatch
|
2019-01-23 21:27:26 -08:00
|
|
|
from ray.rllib.utils.annotations import DeveloperAPI
|
2017-12-14 01:08:23 -08:00
|
|
|
|
|
|
|
|
|
|
|
def discount(x, gamma):
|
|
|
|
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
|
|
|
|
|
|
|
|
|
2020-01-02 17:42:13 -08:00
|
|
|
class Postprocessing:
|
2019-03-29 12:44:23 -07:00
|
|
|
"""Constant definitions for postprocessing."""
|
|
|
|
|
|
|
|
ADVANTAGES = "advantages"
|
|
|
|
VALUE_TARGETS = "value_targets"
|
|
|
|
|
|
|
|
|
2019-01-23 21:27:26 -08:00
|
|
|
@DeveloperAPI
|
2018-07-24 20:51:22 -07:00
|
|
|
def compute_advantages(rollout, last_r, gamma=0.9, lambda_=1.0, use_gae=True):
|
2020-01-21 08:06:50 +01:00
|
|
|
"""
|
|
|
|
Given a rollout, compute its value targets and the advantage.
|
2017-12-24 12:25:13 -08:00
|
|
|
|
|
|
|
Args:
|
2018-07-24 20:51:22 -07:00
|
|
|
rollout (SampleBatch): SampleBatch of a single trajectory
|
2018-06-09 00:21:35 -07:00
|
|
|
last_r (float): Value estimation for last observation
|
2018-07-24 20:51:22 -07:00
|
|
|
gamma (float): Discount factor.
|
2017-12-30 00:24:54 -08:00
|
|
|
lambda_ (float): Parameter for GAE
|
2019-08-13 14:10:22 -07:00
|
|
|
use_gae (bool): Using Generalized Advantage Estimation
|
2017-12-24 12:25:13 -08:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
SampleBatch (SampleBatch): Object with experience from rollout and
|
2018-06-26 13:17:15 -07:00
|
|
|
processed rewards.
|
|
|
|
"""
|
2017-12-14 01:08:23 -08:00
|
|
|
|
|
|
|
traj = {}
|
2019-03-29 12:44:23 -07:00
|
|
|
trajsize = len(rollout[SampleBatch.ACTIONS])
|
2018-05-16 22:59:46 -07:00
|
|
|
for key in rollout:
|
|
|
|
traj[key] = np.stack(rollout[key])
|
2017-12-14 01:08:23 -08:00
|
|
|
|
|
|
|
if use_gae:
|
2019-03-29 12:44:23 -07:00
|
|
|
assert SampleBatch.VF_PREDS in rollout, "Values not found!"
|
|
|
|
vpred_t = np.concatenate(
|
|
|
|
[rollout[SampleBatch.VF_PREDS],
|
|
|
|
np.array([last_r])])
|
|
|
|
delta_t = (
|
|
|
|
traj[SampleBatch.REWARDS] + gamma * vpred_t[1:] - vpred_t[:-1])
|
2020-01-21 08:06:50 +01:00
|
|
|
# This formula for the advantage comes from:
|
2017-12-14 01:08:23 -08:00
|
|
|
# "Generalized Advantage Estimation": https://arxiv.org/abs/1506.02438
|
2019-03-29 12:44:23 -07:00
|
|
|
traj[Postprocessing.ADVANTAGES] = discount(delta_t, gamma * lambda_)
|
|
|
|
traj[Postprocessing.VALUE_TARGETS] = (
|
|
|
|
traj[Postprocessing.ADVANTAGES] +
|
|
|
|
traj[SampleBatch.VF_PREDS]).copy().astype(np.float32)
|
2017-12-14 01:08:23 -08:00
|
|
|
else:
|
2018-06-09 00:21:35 -07:00
|
|
|
rewards_plus_v = np.concatenate(
|
2019-03-29 12:44:23 -07:00
|
|
|
[rollout[SampleBatch.REWARDS],
|
|
|
|
np.array([last_r])])
|
|
|
|
traj[Postprocessing.ADVANTAGES] = discount(rewards_plus_v, gamma)[:-1]
|
2018-07-12 19:22:46 +02:00
|
|
|
# TODO(ekl): support using a critic without GAE
|
2019-03-29 12:44:23 -07:00
|
|
|
traj[Postprocessing.VALUE_TARGETS] = np.zeros_like(
|
|
|
|
traj[Postprocessing.ADVANTAGES])
|
2017-12-14 01:08:23 -08:00
|
|
|
|
2019-03-29 12:44:23 -07:00
|
|
|
traj[Postprocessing.ADVANTAGES] = traj[
|
|
|
|
Postprocessing.ADVANTAGES].copy().astype(np.float32)
|
2017-12-24 12:25:13 -08:00
|
|
|
|
2017-12-14 01:08:23 -08:00
|
|
|
assert all(val.shape[0] == trajsize for val in traj.values()), \
|
|
|
|
"Rollout stacked incorrectly!"
|
2017-12-24 12:25:13 -08:00
|
|
|
return SampleBatch(traj)
|