mirror of
https://github.com/vale981/ray
synced 2025-03-08 19:41:38 -05:00

* Halfway done with transferring MAML to new Ray * MAML Beta Out * Debugging MAML atm * Distributed Execution * Pendulum Mass Working * All experiments complete * Cleaned up codebase * Travis CI * Travis CI * Tests * Merged conflicts * Fixed variance bug conflict * Comment resolved * Apply suggestions from code review fixed test_maml * Update rllib/agents/maml/tests/test_maml.py * asdf * Fix testing Co-authored-by: Sven Mika <sven@anyscale.io>
28 lines
731 B
Python
28 lines
731 B
Python
import numpy as np
|
|
import gym
|
|
from gym.envs.classic_control.pendulum import PendulumEnv
|
|
|
|
|
|
class PendulumMassEnv(PendulumEnv, gym.utils.EzPickle):
|
|
"""PendulumMassEnv varies the weight of the pendulum
|
|
|
|
Tasks are defined to be weight uniformly sampled between [0.5,2]
|
|
"""
|
|
|
|
def sample_tasks(self, n_tasks):
|
|
# Mass is a random float between 0.5 and 2
|
|
return np.random.uniform(low=0.5, high=2.0, size=(n_tasks, ))
|
|
|
|
def set_task(self, task):
|
|
"""
|
|
Args:
|
|
task: task of the meta-learning environment
|
|
"""
|
|
self.m = task
|
|
|
|
def get_task(self):
|
|
"""
|
|
Returns:
|
|
task: task of the meta-learning environment
|
|
"""
|
|
return self.m
|