ray/rllib/utils/exploration/random.py

86 lines
3.5 KiB
Python
Raw Normal View History

from gym.spaces import Discrete, MultiDiscrete, Tuple
from typing import Union
from ray.rllib.utils.annotations import override
from ray.rllib.utils.exploration.exploration import Exploration
from ray.rllib.utils.framework import try_import_tf, try_import_torch, \
tf_function, TensorType
from ray.rllib.utils.tuple_actions import TupleActions
from ray.rllib.models.modelv2 import ModelV2
tf = try_import_tf()
torch, _ = try_import_torch()
class Random(Exploration):
"""A random action selector (deterministic/greedy for explore=False).
If explore=True, returns actions randomly from `self.action_space` (via
Space.sample()).
If explore=False, returns the greedy/max-likelihood action.
"""
def __init__(self, action_space, *, framework="tf", **kwargs):
"""Initialize a Random Exploration object.
Args:
action_space (Space): The gym action space used by the environment.
framework (Optional[str]): One of None, "tf", "torch".
"""
super().__init__(
action_space=action_space, framework=framework, **kwargs)
# Determine py_func types, depending on our action-space.
if isinstance(self.action_space, (Discrete, MultiDiscrete)) or \
(isinstance(self.action_space, Tuple) and
isinstance(self.action_space[0], (Discrete, MultiDiscrete))):
self.dtype_sample, self.dtype = (tf.int64, tf.int32)
else:
self.dtype_sample, self.dtype = (tf.float64, tf.float32)
@override(Exploration)
def get_exploration_action(self,
distribution_inputs: TensorType,
action_dist_class: type,
model: ModelV2,
timestep: Union[int, TensorType],
explore: bool = True):
# Instantiate the distribution object.
[RLlib] Policy.compute_log_likelihoods() and SAC refactor. (issue #7107) (#7124) * Exploration API (+EpsilonGreedy sub-class). * Exploration API (+EpsilonGreedy sub-class). * Cleanup/LINT. * Add `deterministic` to generic Trainer config (NOTE: this is still ignored by most Agents). * Add `error` option to deprecation_warning(). * WIP. * Bug fix: Get exploration-info for tf framework. Bug fix: Properly deprecate some DQN config keys. * WIP. * LINT. * WIP. * Split PerWorkerEpsilonGreedy out of EpsilonGreedy. Docstrings. * Fix bug in sampler.py in case Policy has self.exploration = None * Update rllib/agents/dqn/dqn.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * WIP. * Update rllib/agents/trainer.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * WIP. * Change requests. * LINT * In tune/utils/util.py::deep_update() Only keep deep_updat'ing if both original and value are dicts. If value is not a dict, set * Completely obsolete syn_replay_optimizer.py's parameters schedule_max_timesteps AND beta_annealing_fraction (replaced with prioritized_replay_beta_annealing_timesteps). * Update rllib/evaluation/worker_set.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Review fixes. * Fix default value for DQN's exploration spec. * LINT * Fix recursion bug (wrong parent c'tor). * Do not pass timestep to get_exploration_info. * Update tf_policy.py * Fix some remaining issues with test cases and remove more deprecated DQN/APEX exploration configs. * Bug fix tf-action-dist * DDPG incompatibility bug fix with new DQN exploration handling (which is imported by DDPG). * Switch off exploration when getting action probs from off-policy-estimator's policy. * LINT * Fix test_checkpoint_restore.py. * Deprecate all SAC exploration (unused) configs. * Properly use `model.last_output()` everywhere. Instead of `model._last_output`. * WIP. * Take out set_epsilon from multi-agent-env test (not needed, decays anyway). * WIP. * Trigger re-test (flaky checkpoint-restore test). * WIP. * WIP. * Add test case for deterministic action sampling in PPO. * bug fix. * Added deterministic test cases for different Agents. * Fix problem with TupleActions in dynamic-tf-policy. * Separate supported_spaces tests so they can be run separately for easier debugging. * LINT. * Fix autoregressive_action_dist.py test case. * Re-test. * Fix. * Remove duplicate py_test rule from bazel. * LINT. * WIP. * WIP. * SAC fix. * SAC fix. * WIP. * WIP. * WIP. * FIX 2 examples tests. * WIP. * WIP. * WIP. * WIP. * WIP. * Fix. * LINT. * Renamed test file. * WIP. * Add unittest.main. * Make action_dist_class mandatory. * fix * FIX. * WIP. * WIP. * Fix. * Fix. * Fix explorations test case (contextlib cannot find its own nullcontext??). * Force torch to be installed for QMIX. * LINT. * Fix determine_tests_to_run.py. * Fix determine_tests_to_run.py. * WIP * Add Random exploration component to tests (fixed issue with "static-graph randomness" via py_function). * Add Random exploration component to tests (fixed issue with "static-graph randomness" via py_function). * Rename some stuff. * Rename some stuff. * WIP. * WIP. * Fix SAC. * Fix SAC. * Fix strange tf-error in ray core tests. * Fix strange ray-core tf-error in test_memory_scheduling test case. * Fix test_io.py. * LINT. * Update SAC yaml files' config. Co-authored-by: Eric Liang <ekhliang@gmail.com>
2020-02-22 23:19:49 +01:00
action_dist = action_dist_class(distribution_inputs, model)
if self.framework == "tf":
return self.get_tf_exploration_action_op(action_dist, explore)
else:
return self.get_torch_exploration_action(action_dist, explore)
@tf_function(tf)
def get_tf_exploration_action_op(self, action_dist, explore):
if explore:
action = tf.py_function(self.action_space.sample, [],
self.dtype_sample)
# Will be unnecessary, once we support batch/time-aware Spaces.
action = tf.expand_dims(tf.cast(action, dtype=self.dtype), 0)
else:
action = tf.cast(
action_dist.deterministic_sample(), dtype=self.dtype)
# TODO(sven): Move into (deterministic_)sample(logp=True|False)
if isinstance(action, TupleActions):
batch_size = tf.shape(action[0][0])[0]
else:
batch_size = tf.shape(action)[0]
logp = tf.zeros(shape=(batch_size, ), dtype=tf.float32)
return action, logp
def get_torch_exploration_action(self, action_dist, explore):
tensor_fn = torch.LongTensor if \
type(self.action_space) in [Discrete, MultiDiscrete] else \
torch.FloatTensor
if explore:
# Unsqueeze will be unnecessary, once we support batch/time-aware
# Spaces.
action = tensor_fn(self.action_space.sample()).unsqueeze(0)
else:
action = tensor_fn(action_dist.deterministic_sample())
logp = torch.zeros((action.size()[0], ), dtype=torch.float32)
return action, logp