2020-05-27 10:19:47 +02:00
|
|
|
import unittest
|
|
|
|
|
2020-05-27 16:19:13 +02:00
|
|
|
import ray
|
2020-05-27 10:19:47 +02:00
|
|
|
from ray import tune
|
|
|
|
from ray.rllib.examples.env.stateless_cartpole import StatelessCartPole
|
|
|
|
from ray.rllib.models.catalog import ModelCatalog
|
|
|
|
from ray.rllib.models.tf.attention_net import GTrXLNet
|
|
|
|
|
|
|
|
|
|
|
|
class TestAttentionNetLearning(unittest.TestCase):
|
|
|
|
|
|
|
|
config = {
|
|
|
|
"env": StatelessCartPole,
|
|
|
|
"gamma": 0.99,
|
|
|
|
"num_envs_per_worker": 20,
|
2020-05-27 16:19:13 +02:00
|
|
|
"framework": "tf",
|
2020-05-27 10:19:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
stop = {
|
2020-06-20 00:05:19 +02:00
|
|
|
"episode_reward_mean": 150.0,
|
2020-05-27 10:19:47 +02:00
|
|
|
"timesteps_total": 5000000,
|
|
|
|
}
|
|
|
|
|
2020-05-27 16:19:13 +02:00
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls) -> None:
|
2021-04-30 19:26:30 +02:00
|
|
|
ray.init(num_cpus=5)
|
2020-05-27 16:19:13 +02:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls) -> None:
|
|
|
|
ray.shutdown()
|
|
|
|
|
2020-05-27 10:19:47 +02:00
|
|
|
def test_ppo_attention_net_learning(self):
|
|
|
|
ModelCatalog.register_custom_model("attention_net", GTrXLNet)
|
|
|
|
config = dict(
|
|
|
|
self.config, **{
|
|
|
|
"num_workers": 0,
|
|
|
|
"entropy_coeff": 0.001,
|
|
|
|
"vf_loss_coeff": 1e-5,
|
|
|
|
"num_sgd_iter": 5,
|
|
|
|
"model": {
|
|
|
|
"custom_model": "attention_net",
|
|
|
|
"max_seq_len": 10,
|
|
|
|
"custom_model_config": {
|
|
|
|
"num_transformer_units": 1,
|
2021-01-01 14:06:23 -05:00
|
|
|
"attention_dim": 32,
|
2020-05-27 10:19:47 +02:00
|
|
|
"num_heads": 1,
|
2020-12-21 02:22:32 +01:00
|
|
|
"memory_inference": 5,
|
|
|
|
"memory_training": 5,
|
2020-05-27 10:19:47 +02:00
|
|
|
"head_dim": 32,
|
2021-01-01 14:06:23 -05:00
|
|
|
"position_wise_mlp_dim": 32,
|
2020-05-27 10:19:47 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
tune.run("PPO", config=config, stop=self.stop, verbose=1)
|
|
|
|
|
2020-12-03 15:51:30 +01:00
|
|
|
# TODO: (sven) causes memory failures/timeouts on Travis.
|
|
|
|
# Re-enable this once we have fast attention in master branch.
|
2020-05-27 10:19:47 +02:00
|
|
|
def test_impala_attention_net_learning(self):
|
2020-12-03 15:51:30 +01:00
|
|
|
return
|
|
|
|
# ModelCatalog.register_custom_model("attention_net", GTrXLNet)
|
|
|
|
# config = dict(
|
|
|
|
# self.config, **{
|
|
|
|
# "num_workers": 4,
|
|
|
|
# "num_gpus": 0,
|
|
|
|
# "entropy_coeff": 0.01,
|
|
|
|
# "vf_loss_coeff": 0.001,
|
|
|
|
# "lr": 0.0008,
|
|
|
|
# "model": {
|
|
|
|
# "custom_model": "attention_net",
|
|
|
|
# "max_seq_len": 65,
|
|
|
|
# "custom_model_config": {
|
|
|
|
# "num_transformer_units": 1,
|
2021-01-01 14:06:23 -05:00
|
|
|
# "attention_dim": 64,
|
2020-12-03 15:51:30 +01:00
|
|
|
# "num_heads": 1,
|
2020-12-21 02:22:32 +01:00
|
|
|
# "memory_inference": 10,
|
|
|
|
# "memory_training": 10,
|
2020-12-03 15:51:30 +01:00
|
|
|
# "head_dim": 32,
|
2021-01-01 14:06:23 -05:00
|
|
|
# "position_wise_mlp_dim": 32,
|
2020-12-03 15:51:30 +01:00
|
|
|
# },
|
|
|
|
# },
|
|
|
|
# })
|
|
|
|
# tune.run("IMPALA", config=config, stop=self.stop, verbose=1)
|
2020-05-27 10:19:47 +02:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
import pytest
|
|
|
|
import sys
|
|
|
|
sys.exit(pytest.main(["-v", __file__]))
|