ray/rllib/examples/env/random_env.py
Sven Mika 42991d723f
[RLlib] rllib/examples folder restructuring (#8250)
Cleans up of the rllib/examples folder by moving all example Envs into rllibexamples/env (so they can be used by other scripts and tests as well).
2020-05-01 22:59:34 +02:00

45 lines
1.7 KiB
Python

import gym
from gym.spaces import Tuple
import numpy as np
class RandomEnv(gym.Env):
"""A randomly acting environment.
Can be instantiated with arbitrary action-, observation-, and reward
spaces. Observations and rewards are generated by simply sampling from the
observation/reward spaces. The probability of a `done=True` can be
configured as well.
"""
def __init__(self, config):
# Action space.
self.action_space = config["action_space"]
# Observation space from which to sample.
self.observation_space = config["observation_space"]
# Reward space from which to sample.
self.reward_space = config.get(
"reward_space",
gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=np.float32))
# Chance that an episode ends at any step.
self.p_done = config.get("p_done", 0.1)
# Whether to check action bounds.
self.check_action_bounds = config.get("check_action_bounds", False)
def reset(self):
return self.observation_space.sample()
def step(self, action):
if self.check_action_bounds and not self.action_space.contains(action):
raise ValueError("Illegal action for {}: {}".format(
self.action_space, action))
if (isinstance(self.action_space, Tuple)
and len(action) != len(self.action_space.spaces)):
raise ValueError("Illegal action for {}: {}".format(
self.action_space, action))
return self.observation_space.sample(), \
float(self.reward_space.sample()), \
bool(np.random.choice(
[True, False], p=[self.p_done, 1.0 - self.p_done]
)), {}