mirror of
https://github.com/vale981/ray
synced 2025-03-05 18:11:42 -05:00

* Policy-classes cleanup and torch/tf unification. - Make Policy abstract. - Add `action_dist` to call to `extra_action_out_fn` (necessary for PPO torch). - Move some methods and vars to base Policy (from TFPolicy): num_state_tensors, ACTION_PROB, ACTION_LOGP and some more. * Fix `clip_action` import from Policy (should probably be moved into utils altogether). * - Move `is_recurrent()` and `num_state_tensors()` into TFPolicy (from DynamicTFPolicy). - Add config to all Policy c'tor calls (as 3rd arg after obs and action spaces). * Add `config` to c'tor call to TFPolicy. * Add missing `config` to c'tor call to TFPolicy in marvil_policy.py. * Fix test_rollout_worker.py::MockPolicy and BadPolicy classes (Policy base class is now abstract). * Fix LINT errors in Policy classes. * Implement StatefulPolicy abstract methods in test cases: test_multi_agent_env.py. * policy.py LINT errors. * Create a simple TestPolicy to sub-class from when testing Policies (reduces code in some test cases). * policy.py - Remove abstractmethod from `apply_gradients` and `compute_gradients` (these are not required iff `learn_on_batch` implemented). - Fix docstring of `num_state_tensors`. * Make QMIX torch Policy a child of TorchPolicy (instead of Policy). * QMixPolicy add empty implementations of abstract Policy methods. * Store Policy's config in self.config in base Policy c'tor. * - Make only compute_actions in base Policy's an abstractmethod and provide pass implementation to all other methods if not defined. - Fix state_batches=None (most Policies don't have internal states). * Cartpole tf learning. * Cartpole tf AND torch learning (in ~ same ts). * Cartpole tf AND torch learning (in ~ same ts). 2 * Cartpole tf (torch syntax-broken) learning (in ~ same ts). 3 * Cartpole tf AND torch learning (in ~ same ts). 4 * Cartpole tf AND torch learning (in ~ same ts). 5 * Cartpole tf AND torch learning (in ~ same ts). 6 * Cartpole tf AND torch learning (in ~ same ts). Pendulum tf learning. * WIP. * WIP. * SAC torch learning Pendulum. * WIP. * SAC torch and tf learning Pendulum and Cartpole after cleanup. * WIP. * LINT. * LINT. * SAC: Move policy.target_model to policy.device as well. * Fixes and cleanup. * Fix data-format of tf keras Conv2d layers (broken for some tf-versions which have data_format="channels_first" as default). * Fixes and LINT. * Fixes and LINT. * Fix and LINT. * WIP. * Test fixes and LINT. * Fixes and LINT. Co-authored-by: Sven Mika <sven@Svens-MacBook-Pro.local>
122 lines
3.8 KiB
Python
122 lines
3.8 KiB
Python
import logging
|
|
import numpy as np
|
|
|
|
from ray.rllib.utils.annotations import override, PublicAPI
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
@PublicAPI
|
|
class VectorEnv:
|
|
"""An environment that supports batch evaluation.
|
|
|
|
Subclasses must define the following attributes:
|
|
|
|
Attributes:
|
|
action_space (gym.Space): Action space of individual envs.
|
|
observation_space (gym.Space): Observation space of individual envs.
|
|
num_envs (int): Number of envs in this vector env.
|
|
"""
|
|
|
|
@staticmethod
|
|
def wrap(make_env=None,
|
|
existing_envs=None,
|
|
num_envs=1,
|
|
action_space=None,
|
|
observation_space=None):
|
|
return _VectorizedGymEnv(make_env, existing_envs or [], num_envs,
|
|
action_space, observation_space)
|
|
|
|
@PublicAPI
|
|
def vector_reset(self):
|
|
"""Resets all environments.
|
|
|
|
Returns:
|
|
obs (list): Vector of observations from each environment.
|
|
"""
|
|
raise NotImplementedError
|
|
|
|
@PublicAPI
|
|
def reset_at(self, index):
|
|
"""Resets a single environment.
|
|
|
|
Returns:
|
|
obs (obj): Observations from the resetted environment.
|
|
"""
|
|
raise NotImplementedError
|
|
|
|
@PublicAPI
|
|
def vector_step(self, actions):
|
|
"""Vectorized step.
|
|
|
|
Arguments:
|
|
actions (list): Actions for each env.
|
|
|
|
Returns:
|
|
obs (list): New observations for each env.
|
|
rewards (list): Reward values for each env.
|
|
dones (list): Done values for each env.
|
|
infos (list): Info values for each env.
|
|
"""
|
|
raise NotImplementedError
|
|
|
|
@PublicAPI
|
|
def get_unwrapped(self):
|
|
"""Returns the underlying env instances."""
|
|
raise NotImplementedError
|
|
|
|
|
|
class _VectorizedGymEnv(VectorEnv):
|
|
"""Internal wrapper for gym envs to implement VectorEnv.
|
|
|
|
Arguments:
|
|
make_env (func|None): Factory that produces a new gym env. Must be
|
|
defined if the number of existing envs is less than num_envs.
|
|
existing_envs (list): List of existing gym envs.
|
|
num_envs (int): Desired num gym envs to keep total.
|
|
"""
|
|
|
|
def __init__(self,
|
|
make_env,
|
|
existing_envs,
|
|
num_envs,
|
|
action_space=None,
|
|
observation_space=None):
|
|
self.make_env = make_env
|
|
self.envs = existing_envs
|
|
self.num_envs = num_envs
|
|
while len(self.envs) < self.num_envs:
|
|
self.envs.append(self.make_env(len(self.envs)))
|
|
self.action_space = action_space or self.envs[0].action_space
|
|
self.observation_space = observation_space or \
|
|
self.envs[0].observation_space
|
|
|
|
@override(VectorEnv)
|
|
def vector_reset(self):
|
|
return [e.reset() for e in self.envs]
|
|
|
|
@override(VectorEnv)
|
|
def reset_at(self, index):
|
|
return self.envs[index].reset()
|
|
|
|
@override(VectorEnv)
|
|
def vector_step(self, actions):
|
|
obs_batch, rew_batch, done_batch, info_batch = [], [], [], []
|
|
for i in range(self.num_envs):
|
|
obs, r, done, info = self.envs[i].step(actions[i])
|
|
if not np.isscalar(r) or not np.isreal(r) or not np.isfinite(r):
|
|
raise ValueError(
|
|
"Reward should be finite scalar, got {} ({}). "
|
|
"Actions={}.".format(r, type(r), actions[i]))
|
|
if type(info) is not dict:
|
|
raise ValueError("Info should be a dict, got {} ({})".format(
|
|
info, type(info)))
|
|
obs_batch.append(obs)
|
|
rew_batch.append(r)
|
|
done_batch.append(done)
|
|
info_batch.append(info)
|
|
return obs_batch, rew_batch, done_batch, info_batch
|
|
|
|
@override(VectorEnv)
|
|
def get_unwrapped(self):
|
|
return self.envs
|