2018-03-11 21:14:38 -07:00
|
|
|
import numpy as np
|
|
|
|
from collections import deque
|
|
|
|
import gym
|
|
|
|
from gym import spaces
|
|
|
|
import cv2
|
|
|
|
cv2.ocl.setUseOpenCL(False)
|
|
|
|
|
|
|
|
|
[rllib] Envs for vectorized execution, async execution, and policy serving (#2170)
## What do these changes do?
**Vectorized envs**: Users can either implement `VectorEnv`, or alternatively set `num_envs=N` to auto-vectorize gym envs (this vectorizes just the action computation part).
```
# CartPole-v0 on single core with 64x64 MLP:
# vector_width=1:
Actions per second 2720.1284458322966
# vector_width=8:
Actions per second 13773.035334888269
# vector_width=64:
Actions per second 37903.20472563333
```
**Async envs**: The more general form of `VectorEnv` is `AsyncVectorEnv`, which allows agents to execute out of lockstep. We use this as an adapter to support `ServingEnv`. Since we can convert any other form of env to `AsyncVectorEnv`, utils.sampler has been rewritten to run against this interface.
**Policy serving**: This provides an env which is not stepped. Rather, the env executes in its own thread, querying the policy for actions via `self.get_action(obs)`, and reporting results via `self.log_returns(rewards)`. We also support logging of off-policy actions via `self.log_action(obs, action)`. This is a more convenient API for some use cases, and also provides parallelizable support for policy serving (for example, if you start a HTTP server in the env) and ingest of offline logs (if the env reads from serving logs).
Any of these types of envs can be passed to RLlib agents. RLlib handles conversions internally in CommonPolicyEvaluator, for example:
```
gym.Env => rllib.VectorEnv => rllib.AsyncVectorEnv
rllib.ServingEnv => rllib.AsyncVectorEnv
```
2018-06-18 11:55:32 -07:00
|
|
|
def is_atari(env):
|
2018-09-18 15:09:16 -07:00
|
|
|
if (hasattr(env.observation_space, "shape")
|
|
|
|
and env.observation_space.shape is not None
|
|
|
|
and len(env.observation_space.shape) <= 2):
|
|
|
|
return False
|
[rllib] Envs for vectorized execution, async execution, and policy serving (#2170)
## What do these changes do?
**Vectorized envs**: Users can either implement `VectorEnv`, or alternatively set `num_envs=N` to auto-vectorize gym envs (this vectorizes just the action computation part).
```
# CartPole-v0 on single core with 64x64 MLP:
# vector_width=1:
Actions per second 2720.1284458322966
# vector_width=8:
Actions per second 13773.035334888269
# vector_width=64:
Actions per second 37903.20472563333
```
**Async envs**: The more general form of `VectorEnv` is `AsyncVectorEnv`, which allows agents to execute out of lockstep. We use this as an adapter to support `ServingEnv`. Since we can convert any other form of env to `AsyncVectorEnv`, utils.sampler has been rewritten to run against this interface.
**Policy serving**: This provides an env which is not stepped. Rather, the env executes in its own thread, querying the policy for actions via `self.get_action(obs)`, and reporting results via `self.log_returns(rewards)`. We also support logging of off-policy actions via `self.log_action(obs, action)`. This is a more convenient API for some use cases, and also provides parallelizable support for policy serving (for example, if you start a HTTP server in the env) and ingest of offline logs (if the env reads from serving logs).
Any of these types of envs can be passed to RLlib agents. RLlib handles conversions internally in CommonPolicyEvaluator, for example:
```
gym.Env => rllib.VectorEnv => rllib.AsyncVectorEnv
rllib.ServingEnv => rllib.AsyncVectorEnv
```
2018-06-18 11:55:32 -07:00
|
|
|
return hasattr(env, "unwrapped") and hasattr(env.unwrapped, "ale")
|
|
|
|
|
|
|
|
|
2018-08-23 17:49:10 -07:00
|
|
|
def get_wrapper_by_cls(env, cls):
|
|
|
|
"""Returns the gym env wrapper of the given class, or None."""
|
|
|
|
currentenv = env
|
|
|
|
while True:
|
|
|
|
if isinstance(currentenv, cls):
|
|
|
|
return currentenv
|
|
|
|
elif isinstance(currentenv, gym.Wrapper):
|
|
|
|
currentenv = currentenv.env
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
class MonitorEnv(gym.Wrapper):
|
|
|
|
def __init__(self, env=None):
|
|
|
|
"""Record episodes stats prior to EpisodicLifeEnv, etc."""
|
|
|
|
gym.Wrapper.__init__(self, env)
|
|
|
|
self._current_reward = None
|
|
|
|
self._num_steps = None
|
|
|
|
self._total_steps = None
|
|
|
|
self._episode_rewards = []
|
|
|
|
self._episode_lengths = []
|
|
|
|
self._num_episodes = 0
|
|
|
|
self._num_returned = 0
|
|
|
|
|
|
|
|
def reset(self, **kwargs):
|
|
|
|
obs = self.env.reset(**kwargs)
|
|
|
|
|
|
|
|
if self._total_steps is None:
|
|
|
|
self._total_steps = sum(self._episode_lengths)
|
|
|
|
|
|
|
|
if self._current_reward is not None:
|
|
|
|
self._episode_rewards.append(self._current_reward)
|
|
|
|
self._episode_lengths.append(self._num_steps)
|
|
|
|
self._num_episodes += 1
|
|
|
|
|
|
|
|
self._current_reward = 0
|
|
|
|
self._num_steps = 0
|
|
|
|
|
|
|
|
return obs
|
|
|
|
|
|
|
|
def step(self, action):
|
|
|
|
obs, rew, done, info = self.env.step(action)
|
|
|
|
self._current_reward += rew
|
|
|
|
self._num_steps += 1
|
|
|
|
self._total_steps += 1
|
|
|
|
return (obs, rew, done, info)
|
|
|
|
|
|
|
|
def get_episode_rewards(self):
|
|
|
|
return self._episode_rewards
|
|
|
|
|
|
|
|
def get_episode_lengths(self):
|
|
|
|
return self._episode_lengths
|
|
|
|
|
|
|
|
def get_total_steps(self):
|
|
|
|
return self._total_steps
|
|
|
|
|
|
|
|
def next_episode_results(self):
|
|
|
|
for i in range(self._num_returned, len(self._episode_rewards)):
|
|
|
|
yield (self._episode_rewards[i], self._episode_lengths[i])
|
|
|
|
self._num_returned = len(self._episode_rewards)
|
|
|
|
|
|
|
|
|
2018-03-11 21:14:38 -07:00
|
|
|
class NoopResetEnv(gym.Wrapper):
|
2018-08-20 15:28:03 -07:00
|
|
|
def __init__(self, env, noop_max=30):
|
2018-03-11 21:14:38 -07:00
|
|
|
"""Sample initial states by taking random number of no-ops on reset.
|
|
|
|
No-op is assumed to be action 0.
|
|
|
|
"""
|
|
|
|
gym.Wrapper.__init__(self, env)
|
|
|
|
self.noop_max = noop_max
|
|
|
|
self.override_num_noops = None
|
|
|
|
self.noop_action = 0
|
2019-04-11 14:24:26 -07:00
|
|
|
assert env.unwrapped.get_action_meanings()[0] == "NOOP"
|
2018-03-11 21:14:38 -07:00
|
|
|
|
|
|
|
def reset(self, **kwargs):
|
|
|
|
""" Do no-op action for a number of steps in [1, noop_max]."""
|
|
|
|
self.env.reset(**kwargs)
|
|
|
|
if self.override_num_noops is not None:
|
|
|
|
noops = self.override_num_noops
|
|
|
|
else:
|
2018-07-19 15:30:36 -07:00
|
|
|
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
|
2018-03-11 21:14:38 -07:00
|
|
|
assert noops > 0
|
|
|
|
obs = None
|
|
|
|
for _ in range(noops):
|
2018-08-20 15:28:03 -07:00
|
|
|
obs, _, done, _ = self.env.step(self.noop_action)
|
2018-03-11 21:14:38 -07:00
|
|
|
if done:
|
|
|
|
obs = self.env.reset(**kwargs)
|
|
|
|
return obs
|
|
|
|
|
|
|
|
def step(self, ac):
|
|
|
|
return self.env.step(ac)
|
|
|
|
|
|
|
|
|
|
|
|
class ClipRewardEnv(gym.RewardWrapper):
|
|
|
|
def __init__(self, env):
|
|
|
|
gym.RewardWrapper.__init__(self, env)
|
|
|
|
|
|
|
|
def reward(self, reward):
|
|
|
|
"""Bin reward to {+1, 0, -1} by its sign."""
|
|
|
|
return np.sign(reward)
|
|
|
|
|
|
|
|
|
|
|
|
class FireResetEnv(gym.Wrapper):
|
|
|
|
def __init__(self, env):
|
|
|
|
"""Take action on reset.
|
|
|
|
|
|
|
|
For environments that are fixed until firing."""
|
|
|
|
gym.Wrapper.__init__(self, env)
|
2019-04-11 14:24:26 -07:00
|
|
|
assert env.unwrapped.get_action_meanings()[1] == "FIRE"
|
2018-03-11 21:14:38 -07:00
|
|
|
assert len(env.unwrapped.get_action_meanings()) >= 3
|
|
|
|
|
|
|
|
def reset(self, **kwargs):
|
|
|
|
self.env.reset(**kwargs)
|
|
|
|
obs, _, done, _ = self.env.step(1)
|
|
|
|
if done:
|
|
|
|
self.env.reset(**kwargs)
|
|
|
|
obs, _, done, _ = self.env.step(2)
|
|
|
|
if done:
|
|
|
|
self.env.reset(**kwargs)
|
|
|
|
return obs
|
|
|
|
|
|
|
|
def step(self, ac):
|
|
|
|
return self.env.step(ac)
|
|
|
|
|
|
|
|
|
|
|
|
class EpisodicLifeEnv(gym.Wrapper):
|
|
|
|
def __init__(self, env):
|
|
|
|
"""Make end-of-life == end-of-episode, but only reset on true game over.
|
|
|
|
Done by DeepMind for the DQN and co. since it helps value estimation.
|
|
|
|
"""
|
|
|
|
gym.Wrapper.__init__(self, env)
|
|
|
|
self.lives = 0
|
|
|
|
self.was_real_done = True
|
|
|
|
|
|
|
|
def step(self, action):
|
|
|
|
obs, reward, done, info = self.env.step(action)
|
|
|
|
self.was_real_done = done
|
|
|
|
# check current lives, make loss of life terminal,
|
|
|
|
# then update lives to handle bonus lives
|
|
|
|
lives = self.env.unwrapped.ale.lives()
|
|
|
|
if lives < self.lives and lives > 0:
|
2018-08-20 15:28:03 -07:00
|
|
|
# for Qbert sometimes we stay in lives == 0 condtion for a few fr
|
|
|
|
# so its important to keep lives > 0, so that we only reset once
|
|
|
|
# the environment advertises done.
|
2018-03-11 21:14:38 -07:00
|
|
|
done = True
|
|
|
|
self.lives = lives
|
|
|
|
return obs, reward, done, info
|
|
|
|
|
|
|
|
def reset(self, **kwargs):
|
|
|
|
"""Reset only when lives are exhausted.
|
|
|
|
This way all states are still reachable even though lives are episodic,
|
|
|
|
and the learner need not know about any of this behind-the-scenes.
|
|
|
|
"""
|
|
|
|
if self.was_real_done:
|
|
|
|
obs = self.env.reset(**kwargs)
|
|
|
|
else:
|
|
|
|
# no-op step to advance from terminal/lost life state
|
|
|
|
obs, _, _, _ = self.env.step(0)
|
|
|
|
self.lives = self.env.unwrapped.ale.lives()
|
|
|
|
return obs
|
|
|
|
|
|
|
|
|
|
|
|
class MaxAndSkipEnv(gym.Wrapper):
|
|
|
|
def __init__(self, env, skip=4):
|
|
|
|
"""Return only every `skip`-th frame"""
|
|
|
|
gym.Wrapper.__init__(self, env)
|
|
|
|
# most recent raw observations (for max pooling across time steps)
|
|
|
|
self._obs_buffer = np.zeros(
|
2018-07-19 15:30:36 -07:00
|
|
|
(2, ) + env.observation_space.shape, dtype=np.uint8)
|
2018-03-11 21:14:38 -07:00
|
|
|
self._skip = skip
|
|
|
|
|
|
|
|
def step(self, action):
|
|
|
|
"""Repeat action, sum reward, and max over last observations."""
|
|
|
|
total_reward = 0.0
|
|
|
|
done = None
|
|
|
|
for i in range(self._skip):
|
|
|
|
obs, reward, done, info = self.env.step(action)
|
|
|
|
if i == self._skip - 2:
|
|
|
|
self._obs_buffer[0] = obs
|
|
|
|
if i == self._skip - 1:
|
|
|
|
self._obs_buffer[1] = obs
|
|
|
|
total_reward += reward
|
|
|
|
if done:
|
|
|
|
break
|
|
|
|
# Note that the observation on the done=True frame
|
|
|
|
# doesn't matter
|
|
|
|
max_frame = self._obs_buffer.max(axis=0)
|
|
|
|
|
|
|
|
return max_frame, total_reward, done, info
|
|
|
|
|
|
|
|
def reset(self, **kwargs):
|
|
|
|
return self.env.reset(**kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
class WarpFrame(gym.ObservationWrapper):
|
2018-03-15 15:57:31 -07:00
|
|
|
def __init__(self, env, dim):
|
|
|
|
"""Warp frames to the specified size (dim x dim)."""
|
2018-03-11 21:14:38 -07:00
|
|
|
gym.ObservationWrapper.__init__(self, env)
|
2018-08-20 15:28:03 -07:00
|
|
|
self.width = dim
|
2018-03-15 15:57:31 -07:00
|
|
|
self.height = dim
|
2018-03-11 21:14:38 -07:00
|
|
|
self.observation_space = spaces.Box(
|
2018-08-07 04:35:26 +08:00
|
|
|
low=0,
|
|
|
|
high=255,
|
|
|
|
shape=(self.height, self.width, 1),
|
2018-08-20 15:28:03 -07:00
|
|
|
dtype=np.uint8)
|
2018-03-11 21:14:38 -07:00
|
|
|
|
|
|
|
def observation(self, frame):
|
|
|
|
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
|
|
|
|
frame = cv2.resize(
|
|
|
|
frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
|
|
|
|
return frame[:, :, None]
|
|
|
|
|
|
|
|
|
2021-01-13 08:53:34 +01:00
|
|
|
# TODO: (sven) Deprecated class. Remove once traj. view is the norm.
|
2018-03-11 21:14:38 -07:00
|
|
|
class FrameStack(gym.Wrapper):
|
|
|
|
def __init__(self, env, k):
|
|
|
|
"""Stack k last frames."""
|
|
|
|
gym.Wrapper.__init__(self, env)
|
|
|
|
self.k = k
|
|
|
|
self.frames = deque([], maxlen=k)
|
|
|
|
shp = env.observation_space.shape
|
|
|
|
self.observation_space = spaces.Box(
|
2018-08-07 04:35:26 +08:00
|
|
|
low=0,
|
|
|
|
high=255,
|
|
|
|
shape=(shp[0], shp[1], shp[2] * k),
|
2018-08-20 15:28:03 -07:00
|
|
|
dtype=env.observation_space.dtype)
|
2018-03-11 21:14:38 -07:00
|
|
|
|
|
|
|
def reset(self):
|
|
|
|
ob = self.env.reset()
|
|
|
|
for _ in range(self.k):
|
|
|
|
self.frames.append(ob)
|
|
|
|
return self._get_ob()
|
|
|
|
|
|
|
|
def step(self, action):
|
|
|
|
ob, reward, done, info = self.env.step(action)
|
|
|
|
self.frames.append(ob)
|
|
|
|
return self._get_ob(), reward, done, info
|
|
|
|
|
|
|
|
def _get_ob(self):
|
|
|
|
assert len(self.frames) == self.k
|
|
|
|
return np.concatenate(self.frames, axis=2)
|
|
|
|
|
|
|
|
|
2021-01-13 08:53:34 +01:00
|
|
|
class FrameStackTrajectoryView(gym.ObservationWrapper):
|
|
|
|
def __init__(self, env):
|
|
|
|
"""No stacking. Trajectory View API takes care of this."""
|
|
|
|
gym.Wrapper.__init__(self, env)
|
|
|
|
shp = env.observation_space.shape
|
|
|
|
assert shp[2] == 1
|
|
|
|
self.observation_space = spaces.Box(
|
|
|
|
low=0,
|
|
|
|
high=255,
|
|
|
|
shape=(shp[0], shp[1]),
|
|
|
|
dtype=env.observation_space.dtype)
|
|
|
|
|
|
|
|
def observation(self, observation):
|
|
|
|
return np.squeeze(observation, axis=-1)
|
|
|
|
|
|
|
|
|
2018-08-20 15:28:03 -07:00
|
|
|
class ScaledFloatFrame(gym.ObservationWrapper):
|
|
|
|
def __init__(self, env):
|
|
|
|
gym.ObservationWrapper.__init__(self, env)
|
|
|
|
self.observation_space = gym.spaces.Box(
|
|
|
|
low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
|
|
|
|
|
|
|
|
def observation(self, observation):
|
|
|
|
# careful! This undoes the memory optimization, use
|
|
|
|
# with smaller replay buffers only.
|
|
|
|
return np.array(observation).astype(np.float32) / 255.0
|
|
|
|
|
|
|
|
|
2021-01-13 08:53:34 +01:00
|
|
|
def wrap_deepmind(
|
|
|
|
env,
|
|
|
|
dim=84,
|
|
|
|
# TODO: (sven) Remove once traj. view is norm.
|
|
|
|
framestack=True,
|
|
|
|
framestack_via_traj_view_api=False):
|
2018-03-11 21:14:38 -07:00
|
|
|
"""Configure environment for DeepMind-style Atari.
|
|
|
|
|
|
|
|
Note that we assume reward clipping is done outside the wrapper.
|
2018-03-15 15:57:31 -07:00
|
|
|
|
|
|
|
Args:
|
|
|
|
dim (int): Dimension to resize observations to (dim x dim).
|
2018-08-23 17:49:10 -07:00
|
|
|
framestack (bool): Whether to framestack observations.
|
2018-03-11 21:14:38 -07:00
|
|
|
"""
|
2018-08-23 17:49:10 -07:00
|
|
|
env = MonitorEnv(env)
|
2018-08-20 15:28:03 -07:00
|
|
|
env = NoopResetEnv(env, noop_max=30)
|
2020-12-13 16:15:54 +01:00
|
|
|
if env.spec is not None and "NoFrameskip" in env.spec.id:
|
2018-03-11 21:14:38 -07:00
|
|
|
env = MaxAndSkipEnv(env, skip=4)
|
|
|
|
env = EpisodicLifeEnv(env)
|
2019-04-11 14:24:26 -07:00
|
|
|
if "FIRE" in env.unwrapped.get_action_meanings():
|
2018-03-11 21:14:38 -07:00
|
|
|
env = FireResetEnv(env)
|
2018-03-15 15:57:31 -07:00
|
|
|
env = WarpFrame(env, dim)
|
2018-08-20 15:28:03 -07:00
|
|
|
# env = ScaledFloatFrame(env) # TODO: use for dqn?
|
|
|
|
# env = ClipRewardEnv(env) # reward clipping is handled by policy eval
|
2021-01-13 08:53:34 +01:00
|
|
|
# New way of frame stacking via the trajectory view API (model config key:
|
|
|
|
# `num_framestacks=[int]`.
|
|
|
|
if framestack_via_traj_view_api:
|
|
|
|
env = FrameStackTrajectoryView(env)
|
|
|
|
# Old way (w/o traj. view API) via model config key: `framestack=True`.
|
|
|
|
# TODO: (sven) Remove once traj. view is norm.
|
|
|
|
elif framestack is True:
|
2018-08-23 17:49:10 -07:00
|
|
|
env = FrameStack(env, 4)
|
2018-03-11 21:14:38 -07:00
|
|
|
return env
|