ray/rllib/examples/custom_keras_model.py

143 lines
5 KiB
Python
Raw Normal View History

"""Example of using a custom ModelV2 Keras-style model."""
2019-07-03 15:59:47 -07:00
import argparse
import os
2019-07-03 15:59:47 -07:00
import ray
from ray import tune
from ray.rllib.agents.dqn.distributional_q_tf_model import \
DistributionalQTFModel
2019-07-03 15:59:47 -07:00
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.misc import normc_initializer
2019-07-03 15:59:47 -07:00
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.visionnet import VisionNetwork as MyVisionNetwork
from ray.rllib.policy.policy import LEARNER_STATS_KEY
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.framework import try_import_tf
2019-07-03 15:59:47 -07:00
tf1, tf, tfv = try_import_tf()
2019-07-03 15:59:47 -07:00
parser = argparse.ArgumentParser()
parser.add_argument(
"--run",
type=str,
default="DQN",
help="The RLlib-registered algorithm to use.")
2019-07-03 15:59:47 -07:00
parser.add_argument("--stop", type=int, default=200)
parser.add_argument("--use-vision-network", action="store_true")
parser.add_argument("--num-cpus", type=int, default=0)
2019-07-03 15:59:47 -07:00
class MyKerasModel(TFModelV2):
"""Custom model for policy gradient algorithms."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super(MyKerasModel, self).__init__(obs_space, action_space,
num_outputs, model_config, name)
self.inputs = tf.keras.layers.Input(
shape=obs_space.shape, name="observations")
layer_1 = tf.keras.layers.Dense(
128,
name="my_layer1",
activation=tf.nn.relu,
kernel_initializer=normc_initializer(1.0))(self.inputs)
layer_out = tf.keras.layers.Dense(
num_outputs,
name="my_out",
activation=None,
kernel_initializer=normc_initializer(0.01))(layer_1)
value_out = tf.keras.layers.Dense(
1,
name="value_out",
activation=None,
kernel_initializer=normc_initializer(0.01))(layer_1)
self.base_model = tf.keras.Model(self.inputs, [layer_out, value_out])
def forward(self, input_dict, state, seq_lens):
model_out, self._value_out = self.base_model(input_dict["obs"])
return model_out, state
def value_function(self):
return tf.reshape(self._value_out, [-1])
def metrics(self):
return {"foo": tf.constant(42.0)}
2019-07-03 15:59:47 -07:00
[RLlib] DQN torch version. (#7597) * Fix. * Rollback. * WIP. * WIP. * WIP. * WIP. * WIP. * WIP. * WIP. * WIP. * Fix. * Fix. * Fix. * Fix. * Fix. * WIP. * WIP. * Fix. * Test case fixes. * Test case fixes and LINT. * Test case fixes and LINT. * Rollback. * WIP. * WIP. * Test case fixes. * Fix. * Fix. * Fix. * Add regression test for DQN w/ param noise. * Fixes and LINT. * Fixes and LINT. * Fixes and LINT. * Fixes and LINT. * Fixes and LINT. * Comment * Regression test case. * WIP. * WIP. * LINT. * LINT. * WIP. * Fix. * Fix. * Fix. * LINT. * Fix (SAC does currently not support eager). * Fix. * WIP. * LINT. * Update rllib/evaluation/sampler.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Update rllib/evaluation/sampler.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Update rllib/utils/exploration/exploration.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Update rllib/utils/exploration/exploration.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * WIP. * WIP. * Fix. * LINT. * LINT. * Fix and LINT. * WIP. * WIP. * WIP. * WIP. * Fix. * LINT. * Fix. * Fix and LINT. * Update rllib/utils/exploration/exploration.py * Update rllib/policy/dynamic_tf_policy.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Update rllib/policy/dynamic_tf_policy.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Update rllib/policy/dynamic_tf_policy.py Co-Authored-By: Eric Liang <ekhliang@gmail.com> * Fixes. * WIP. * LINT. * Fixes and LINT. * LINT and fixes. * LINT. * Move action_dist back into torch extra_action_out_fn and LINT. * Working SimpleQ learning cartpole on both torch AND tf. * Working Rainbow learning cartpole on tf. * Working Rainbow learning cartpole on tf. * WIP. * LINT. * LINT. * Update docs and add torch to APEX test. * LINT. * Fix. * LINT. * Fix. * Fix. * Fix and docstrings. * Fix broken RLlib tests in master. * Split BAZEL learning tests into cartpole and pendulum (reached the 60min barrier). * Fix error_outputs option in BAZEL for RLlib regression tests. * Fix. * Tune param-noise tests. * LINT. * Fix. * Fix. * test * test * test * Fix. * Fix. * WIP. * WIP. * WIP. * WIP. * LINT. * WIP. Co-authored-by: Eric Liang <ekhliang@gmail.com>
2020-04-06 20:56:16 +02:00
class MyKerasQModel(DistributionalQTFModel):
2019-07-03 15:59:47 -07:00
"""Custom model for DQN."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name, **kw):
super(MyKerasQModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name, **kw)
# Define the core model layers which will be used by the other
# output heads of DistributionalQModel
self.inputs = tf.keras.layers.Input(
shape=obs_space.shape, name="observations")
layer_1 = tf.keras.layers.Dense(
128,
name="my_layer1",
activation=tf.nn.relu,
kernel_initializer=normc_initializer(1.0))(self.inputs)
layer_out = tf.keras.layers.Dense(
num_outputs,
name="my_out",
activation=tf.nn.relu,
kernel_initializer=normc_initializer(1.0))(layer_1)
self.base_model = tf.keras.Model(self.inputs, layer_out)
# Implement the core forward method.
2019-07-03 15:59:47 -07:00
def forward(self, input_dict, state, seq_lens):
model_out = self.base_model(input_dict["obs"])
return model_out, state
def metrics(self):
return {"foo": tf.constant(42.0)}
2019-07-03 15:59:47 -07:00
if __name__ == "__main__":
args = parser.parse_args()
ray.init(num_cpus=args.num_cpus or None)
ModelCatalog.register_custom_model(
"keras_model", MyVisionNetwork
if args.use_vision_network else MyKerasModel)
ModelCatalog.register_custom_model(
"keras_q_model", MyVisionNetwork
if args.use_vision_network else MyKerasQModel)
# Tests https://github.com/ray-project/ray/issues/7293
def check_has_custom_metric(result):
r = result["result"]["info"]["learner"]
if DEFAULT_POLICY_ID in r:
r = r[DEFAULT_POLICY_ID].get(LEARNER_STATS_KEY,
r[DEFAULT_POLICY_ID])
assert r["model"]["foo"] == 42, result
if args.run == "DQN":
extra_config = {"learning_starts": 0}
else:
extra_config = {}
2019-07-03 15:59:47 -07:00
tune.run(
args.run,
stop={"episode_reward_mean": args.stop},
config=dict(
extra_config,
**{
"env": "BreakoutNoFrameskip-v4"
if args.use_vision_network else "CartPole-v0",
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"callbacks": {
"on_train_result": check_has_custom_metric,
},
"model": {
"custom_model": "keras_q_model"
if args.run == "DQN" else "keras_model"
},
"framework": "tf",
}))