2020-10-05 00:05:02 -07:00
|
|
|
"""Wrap Google's RecSim environment for RLlib
|
|
|
|
|
|
|
|
RecSim is a configurable recommender systems simulation platform.
|
|
|
|
Source: https://github.com/google-research/recsim
|
|
|
|
"""
|
|
|
|
|
2020-10-07 23:23:27 -07:00
|
|
|
from collections import OrderedDict
|
2020-10-05 00:05:02 -07:00
|
|
|
import gym
|
|
|
|
from gym import spaces
|
2021-01-19 10:09:39 +01:00
|
|
|
import numpy as np
|
2020-10-05 00:05:02 -07:00
|
|
|
from recsim.environments import interest_evolution
|
2021-01-19 10:09:39 +01:00
|
|
|
from typing import List
|
2020-10-05 00:05:02 -07:00
|
|
|
|
|
|
|
from ray.rllib.utils.error import UnsupportedSpaceException
|
|
|
|
from ray.tune.registry import register_env
|
|
|
|
|
[RLlib] Upgrade gym version to 0.21 and deprecate pendulum-v0. (#19535)
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
* Reformatting
* Fixing tests
* Move atari-py install conditional to req.txt
* migrate to new ale install method
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
Move atari-py install conditional to req.txt
migrate to new ale install method
Make parametric_actions_cartpole return float32 actions/obs
Adding type conversions if obs/actions don't match space
Add utils to make elements match gym space dtypes
Co-authored-by: Jun Gong <jungong@anyscale.com>
Co-authored-by: sven1977 <svenmika1977@gmail.com>
2021-11-03 08:24:00 -07:00
|
|
|
from ray.rllib.utils.spaces.space_utils import convert_element_to_space_type
|
|
|
|
|
2020-10-05 00:05:02 -07:00
|
|
|
|
|
|
|
class RecSimObservationSpaceWrapper(gym.ObservationWrapper):
|
|
|
|
"""Fix RecSim environment's observation space
|
|
|
|
|
|
|
|
In RecSim's observation spaces, the "doc" field is a dictionary keyed by
|
|
|
|
document IDs. Those IDs are changing every step, thus generating a
|
|
|
|
different observation space in each time. This causes issues for RLlib
|
|
|
|
because it expects the observation space to remain the same across steps.
|
|
|
|
|
|
|
|
This environment wrapper fixes that by reindexing the documents by their
|
|
|
|
positions in the list.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, env: gym.Env):
|
|
|
|
super().__init__(env)
|
|
|
|
obs_space = self.env.observation_space
|
|
|
|
doc_space = spaces.Dict(
|
|
|
|
OrderedDict(
|
|
|
|
[(str(k), doc)
|
|
|
|
for k, (_,
|
|
|
|
doc) in enumerate(obs_space["doc"].spaces.items())]))
|
|
|
|
self.observation_space = spaces.Dict(
|
|
|
|
OrderedDict([
|
|
|
|
("user", obs_space["user"]),
|
|
|
|
("doc", doc_space),
|
|
|
|
("response", obs_space["response"]),
|
|
|
|
]))
|
[RLlib] Upgrade gym version to 0.21 and deprecate pendulum-v0. (#19535)
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
* Reformatting
* Fixing tests
* Move atari-py install conditional to req.txt
* migrate to new ale install method
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
Move atari-py install conditional to req.txt
migrate to new ale install method
Make parametric_actions_cartpole return float32 actions/obs
Adding type conversions if obs/actions don't match space
Add utils to make elements match gym space dtypes
Co-authored-by: Jun Gong <jungong@anyscale.com>
Co-authored-by: sven1977 <svenmika1977@gmail.com>
2021-11-03 08:24:00 -07:00
|
|
|
self._sampled_obs = self.observation_space.sample()
|
2020-10-05 00:05:02 -07:00
|
|
|
|
|
|
|
def observation(self, obs):
|
|
|
|
new_obs = OrderedDict()
|
|
|
|
new_obs["user"] = obs["user"]
|
|
|
|
new_obs["doc"] = {
|
|
|
|
str(k): v
|
|
|
|
for k, (_, v) in enumerate(obs["doc"].items())
|
|
|
|
}
|
|
|
|
new_obs["response"] = obs["response"]
|
[RLlib] Upgrade gym version to 0.21 and deprecate pendulum-v0. (#19535)
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
* Reformatting
* Fixing tests
* Move atari-py install conditional to req.txt
* migrate to new ale install method
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
Move atari-py install conditional to req.txt
migrate to new ale install method
Make parametric_actions_cartpole return float32 actions/obs
Adding type conversions if obs/actions don't match space
Add utils to make elements match gym space dtypes
Co-authored-by: Jun Gong <jungong@anyscale.com>
Co-authored-by: sven1977 <svenmika1977@gmail.com>
2021-11-03 08:24:00 -07:00
|
|
|
new_obs = convert_element_to_space_type(new_obs, self._sampled_obs)
|
2020-10-05 00:05:02 -07:00
|
|
|
return new_obs
|
|
|
|
|
|
|
|
|
|
|
|
class RecSimResetWrapper(gym.Wrapper):
|
2020-11-03 00:52:04 -08:00
|
|
|
"""Fix RecSim environment's reset() and close() function
|
2020-10-05 00:05:02 -07:00
|
|
|
|
|
|
|
RecSim's reset() function returns an observation without the "response"
|
|
|
|
field, breaking RLlib's check. This wrapper fixes that by assigning a
|
|
|
|
random "response".
|
2020-11-03 00:52:04 -08:00
|
|
|
|
|
|
|
RecSim's close() function raises NotImplementedError. We change the
|
|
|
|
behavior to doing nothing.
|
2020-10-05 00:05:02 -07:00
|
|
|
"""
|
|
|
|
|
[RLlib] Upgrade gym version to 0.21 and deprecate pendulum-v0. (#19535)
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
* Reformatting
* Fixing tests
* Move atari-py install conditional to req.txt
* migrate to new ale install method
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
Move atari-py install conditional to req.txt
migrate to new ale install method
Make parametric_actions_cartpole return float32 actions/obs
Adding type conversions if obs/actions don't match space
Add utils to make elements match gym space dtypes
Co-authored-by: Jun Gong <jungong@anyscale.com>
Co-authored-by: sven1977 <svenmika1977@gmail.com>
2021-11-03 08:24:00 -07:00
|
|
|
def __init__(self, env: gym.Env):
|
|
|
|
super().__init__(env)
|
|
|
|
self._sampled_obs = self.env.observation_space.sample()
|
|
|
|
|
2020-10-05 00:05:02 -07:00
|
|
|
def reset(self):
|
|
|
|
obs = super().reset()
|
|
|
|
obs["response"] = self.env.observation_space["response"].sample()
|
[RLlib] Upgrade gym version to 0.21 and deprecate pendulum-v0. (#19535)
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
* Reformatting
* Fixing tests
* Move atari-py install conditional to req.txt
* migrate to new ale install method
* Fix QMix, SAC, and MADDPA too.
* Unpin gym and deprecate pendulum v0
Many tests in rllib depended on pendulum v0,
however in gym 0.21, pendulum v0 was deprecated
in favor of pendulum v1. This may change reward
thresholds, so will have to potentially rerun
all of the pendulum v1 benchmarks, or use another
environment in favor. The same applies to frozen
lake v0 and frozen lake v1
Lastly, all of the RLlib tests and have
been moved to python 3.7
* Add gym installation based on python version.
Pin python<= 3.6 to gym 0.19 due to install
issues with atari roms in gym 0.20
Move atari-py install conditional to req.txt
migrate to new ale install method
Make parametric_actions_cartpole return float32 actions/obs
Adding type conversions if obs/actions don't match space
Add utils to make elements match gym space dtypes
Co-authored-by: Jun Gong <jungong@anyscale.com>
Co-authored-by: sven1977 <svenmika1977@gmail.com>
2021-11-03 08:24:00 -07:00
|
|
|
obs = convert_element_to_space_type(obs, self._sampled_obs)
|
2020-10-05 00:05:02 -07:00
|
|
|
return obs
|
|
|
|
|
2020-11-03 00:52:04 -08:00
|
|
|
def close(self):
|
|
|
|
pass
|
|
|
|
|
2020-10-05 00:05:02 -07:00
|
|
|
|
|
|
|
class MultiDiscreteToDiscreteActionWrapper(gym.ActionWrapper):
|
|
|
|
"""Convert the action space from MultiDiscrete to Discrete
|
|
|
|
|
|
|
|
At this moment, RLlib's DQN algorithms only work on Discrete action space.
|
|
|
|
This wrapper allows us to apply DQN algorithms to the RecSim environment.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, env: gym.Env):
|
|
|
|
super().__init__(env)
|
|
|
|
|
|
|
|
if not isinstance(env.action_space, spaces.MultiDiscrete):
|
|
|
|
raise UnsupportedSpaceException(
|
|
|
|
f"Action space {env.action_space} "
|
|
|
|
f"is not supported by {self.__class__.__name__}")
|
|
|
|
self.action_space_dimensions = env.action_space.nvec
|
|
|
|
self.action_space = spaces.Discrete(
|
2020-10-07 23:23:27 -07:00
|
|
|
np.prod(self.action_space_dimensions))
|
2020-10-05 00:05:02 -07:00
|
|
|
|
|
|
|
def action(self, action: int) -> List[int]:
|
|
|
|
"""Convert a Discrete action to a MultiDiscrete action"""
|
|
|
|
multi_action = [None] * len(self.action_space_dimensions)
|
|
|
|
for idx, n in enumerate(self.action_space_dimensions):
|
|
|
|
action, dim_action = divmod(action, n)
|
|
|
|
multi_action[idx] = dim_action
|
|
|
|
return multi_action
|
|
|
|
|
|
|
|
|
|
|
|
def make_recsim_env(config):
|
|
|
|
DEFAULT_ENV_CONFIG = {
|
|
|
|
"num_candidates": 10,
|
|
|
|
"slate_size": 2,
|
|
|
|
"resample_documents": True,
|
|
|
|
"seed": 0,
|
|
|
|
"convert_to_discrete_action_space": False,
|
|
|
|
}
|
|
|
|
env_config = DEFAULT_ENV_CONFIG.copy()
|
|
|
|
env_config.update(config)
|
|
|
|
env = interest_evolution.create_environment(env_config)
|
|
|
|
env = RecSimResetWrapper(env)
|
|
|
|
env = RecSimObservationSpaceWrapper(env)
|
2020-11-03 00:52:04 -08:00
|
|
|
if env_config and env_config["convert_to_discrete_action_space"]:
|
2020-10-05 00:05:02 -07:00
|
|
|
env = MultiDiscreteToDiscreteActionWrapper(env)
|
|
|
|
return env
|
|
|
|
|
|
|
|
|
|
|
|
env_name = "RecSim-v1"
|
|
|
|
register_env(name=env_name, env_creator=make_recsim_env)
|