ray/rllib/utils/exploration/epsilon_greedy.py
Sven Mika 428516056a
[RLlib] SAC Torch (incl. Atari learning) (#7984)
* Policy-classes cleanup and torch/tf unification.
- Make Policy abstract.
- Add `action_dist` to call to `extra_action_out_fn` (necessary for PPO torch).
- Move some methods and vars to base Policy
  (from TFPolicy): num_state_tensors, ACTION_PROB, ACTION_LOGP and some more.

* Fix `clip_action` import from Policy (should probably be moved into utils altogether).

* - Move `is_recurrent()` and `num_state_tensors()` into TFPolicy (from DynamicTFPolicy).
- Add config to all Policy c'tor calls (as 3rd arg after obs and action spaces).

* Add `config` to c'tor call to TFPolicy.

* Add missing `config` to c'tor call to TFPolicy in marvil_policy.py.

* Fix test_rollout_worker.py::MockPolicy and BadPolicy classes (Policy base class is now abstract).

* Fix LINT errors in Policy classes.

* Implement StatefulPolicy abstract methods in test cases: test_multi_agent_env.py.

* policy.py LINT errors.

* Create a simple TestPolicy to sub-class from when testing Policies (reduces code in some test cases).

* policy.py
- Remove abstractmethod from `apply_gradients` and `compute_gradients` (these are not required iff `learn_on_batch` implemented).
- Fix docstring of `num_state_tensors`.

* Make QMIX torch Policy a child of TorchPolicy (instead of Policy).

* QMixPolicy add empty implementations of abstract Policy methods.

* Store Policy's config in self.config in base Policy c'tor.

* - Make only compute_actions in base Policy's an abstractmethod and provide pass
implementation to all other methods if not defined.
- Fix state_batches=None (most Policies don't have internal states).

* Cartpole tf learning.

* Cartpole tf AND torch learning (in ~ same ts).

* Cartpole tf AND torch learning (in ~ same ts). 2

* Cartpole tf (torch syntax-broken) learning (in ~ same ts). 3

* Cartpole tf AND torch learning (in ~ same ts). 4

* Cartpole tf AND torch learning (in ~ same ts). 5

* Cartpole tf AND torch learning (in ~ same ts). 6

* Cartpole tf AND torch learning (in ~ same ts). Pendulum tf learning.

* WIP.

* WIP.

* SAC torch learning Pendulum.

* WIP.

* SAC torch and tf learning Pendulum and Cartpole after cleanup.

* WIP.

* LINT.

* LINT.

* SAC: Move policy.target_model to policy.device as well.

* Fixes and cleanup.

* Fix data-format of tf keras Conv2d layers (broken for some tf-versions which have data_format="channels_first" as default).

* Fixes and LINT.

* Fixes and LINT.

* Fix and LINT.

* WIP.

* Test fixes and LINT.

* Fixes and LINT.

Co-authored-by: Sven Mika <sven@Svens-MacBook-Pro.local>
2020-04-15 13:25:16 +02:00

155 lines
6 KiB
Python

from typing import Union
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.utils.annotations import override
from ray.rllib.utils.exploration.exploration import Exploration, TensorType
from ray.rllib.utils.framework import try_import_tf, try_import_torch, \
get_variable
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.schedules import Schedule, PiecewiseSchedule
tf = try_import_tf()
torch, _ = try_import_torch()
class EpsilonGreedy(Exploration):
"""Epsilon-greedy Exploration class that produces exploration actions.
When given a Model's output and a current epsilon value (based on some
Schedule), it produces a random action (if rand(1) < eps) or
uses the model-computed one (if rand(1) >= eps).
"""
def __init__(self,
action_space,
*,
framework: str,
initial_epsilon=1.0,
final_epsilon=0.05,
epsilon_timesteps=int(1e5),
epsilon_schedule=None,
**kwargs):
"""Create an EpsilonGreedy exploration class.
Args:
initial_epsilon (float): The initial epsilon value to use.
final_epsilon (float): The final epsilon value to use.
epsilon_timesteps (int): The time step after which epsilon should
always be `final_epsilon`.
epsilon_schedule (Optional[Schedule]): An optional Schedule object
to use (instead of constructing one from the given parameters).
"""
assert framework is not None
super().__init__(
action_space=action_space, framework=framework, **kwargs)
self.epsilon_schedule = \
from_config(Schedule, epsilon_schedule, framework=framework) or \
PiecewiseSchedule(
endpoints=[
(0, initial_epsilon), (epsilon_timesteps, final_epsilon)],
outside_value=final_epsilon,
framework=self.framework)
# The current timestep value (tf-var or python int).
self.last_timestep = get_variable(
0, framework=framework, tf_name="timestep")
@override(Exploration)
def get_exploration_action(self,
*,
action_distribution: ActionDistribution,
timestep: Union[int, TensorType],
explore: bool = True):
q_values = action_distribution.inputs
if self.framework == "tf":
return self._get_tf_exploration_action_op(q_values, explore,
timestep)
else:
return self._get_torch_exploration_action(q_values, explore,
timestep)
def _get_tf_exploration_action_op(self, q_values, explore, timestep):
"""TF method to produce the tf op for an epsilon exploration action.
Args:
q_values (Tensor): The Q-values coming from some q-model.
Returns:
tf.Tensor: The tf exploration-action op.
"""
epsilon = self.epsilon_schedule(timestep if timestep is not None else
self.last_timestep)
# Get the exploit action as the one with the highest logit value.
exploit_action = tf.argmax(q_values, axis=1)
batch_size = tf.shape(q_values)[0]
# Mask out actions with q-value=-inf so that we don't even consider
# them for exploration.
random_valid_action_logits = tf.where(
tf.equal(q_values, tf.float32.min),
tf.ones_like(q_values) * tf.float32.min, tf.ones_like(q_values))
random_actions = tf.squeeze(
tf.multinomial(random_valid_action_logits, 1), axis=1)
chose_random = tf.random_uniform(
tf.stack([batch_size]),
minval=0, maxval=1, dtype=tf.float32) \
< epsilon
action = tf.cond(
pred=tf.constant(explore, dtype=tf.bool)
if isinstance(explore, bool) else explore,
true_fn=(
lambda: tf.where(chose_random, random_actions, exploit_action)
),
false_fn=lambda: exploit_action)
assign_op = tf.assign(self.last_timestep, timestep)
with tf.control_dependencies([assign_op]):
return action, tf.zeros_like(action, dtype=tf.float32)
def _get_torch_exploration_action(self, q_values, explore, timestep):
"""Torch method to produce an epsilon exploration action.
Args:
q_values (Tensor): The Q-values coming from some Q-model.
Returns:
torch.Tensor: The exploration-action.
"""
self.last_timestep = timestep
_, exploit_action = torch.max(q_values, 1)
action_logp = torch.zeros_like(exploit_action)
# Explore.
if explore:
# Get the current epsilon.
epsilon = self.epsilon_schedule(self.last_timestep)
batch_size = q_values.size()[0]
# Mask out actions, whose Q-values are -inf, so that we don't
# even consider them for exploration.
random_valid_action_logits = torch.where(
q_values == float("-inf"),
torch.ones_like(q_values) * float("-inf"),
torch.ones_like(q_values))
# A random action.
random_actions = torch.squeeze(
torch.multinomial(random_valid_action_logits, 1), axis=1)
# Pick either random or greedy.
action = torch.where(
torch.empty(
(batch_size, )).uniform_().to(self.device) < epsilon,
random_actions, exploit_action)
return action, action_logp
# Return the deterministic "sample" (argmax) over the logits.
else:
return exploit_action, action_logp
@override(Exploration)
def get_info(self):
eps = self.epsilon_schedule(self.last_timestep)
return {"cur_epsilon": eps}