mirror of
https://github.com/vale981/ray
synced 2025-03-08 11:31:40 -05:00
51 lines
1.9 KiB
Python
51 lines
1.9 KiB
Python
import gym
|
|
from gym.spaces import Discrete, Tuple
|
|
import numpy as np
|
|
|
|
from ray.rllib.examples.env.multi_agent import make_multiagent
|
|
|
|
|
|
class RandomEnv(gym.Env):
|
|
"""A randomly acting environment.
|
|
|
|
Can be instantiated with arbitrary action-, observation-, and reward
|
|
spaces. Observations and rewards are generated by simply sampling from the
|
|
observation/reward spaces. The probability of a `done=True` can be
|
|
configured as well.
|
|
"""
|
|
|
|
def __init__(self, config):
|
|
# Action space.
|
|
self.action_space = config.get("action_space", Discrete(2))
|
|
# Observation space from which to sample.
|
|
self.observation_space = config.get("observation_space", Discrete(2))
|
|
# Reward space from which to sample.
|
|
self.reward_space = config.get(
|
|
"reward_space",
|
|
gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=np.float32))
|
|
# Chance that an episode ends at any step.
|
|
self.p_done = config.get("p_done", 0.1)
|
|
# Whether to check action bounds.
|
|
self.check_action_bounds = config.get("check_action_bounds", False)
|
|
|
|
def reset(self):
|
|
return self.observation_space.sample()
|
|
|
|
def step(self, action):
|
|
if self.check_action_bounds and not self.action_space.contains(action):
|
|
raise ValueError("Illegal action for {}: {}".format(
|
|
self.action_space, action))
|
|
if (isinstance(self.action_space, Tuple)
|
|
and len(action) != len(self.action_space.spaces)):
|
|
raise ValueError("Illegal action for {}: {}".format(
|
|
self.action_space, action))
|
|
|
|
return self.observation_space.sample(), \
|
|
float(self.reward_space.sample()), \
|
|
bool(np.random.choice(
|
|
[True, False], p=[self.p_done, 1.0 - self.p_done]
|
|
)), {}
|
|
|
|
|
|
# Multi-agent version of the RandomEnv.
|
|
RandomMultiAgentEnv = make_multiagent(lambda c: RandomEnv(c))
|