mirror of
https://github.com/vale981/ray
synced 2025-03-06 02:21:39 -05:00

* Remove all __future__ imports from RLlib. * Remove (object) again from tf_run_builder.py::TFRunBuilder. * Fix 2xLINT warnings. * Fix broken appo_policy import (must be appo_tf_policy) * Remove future imports from all other ray files (not just RLlib). * Remove future imports from all other ray files (not just RLlib). * Remove future import blocks that contain `unicode_literals` as well. Revert appo_tf_policy.py to appo_policy.py (belongs to another PR). * Add two empty lines before Schedule class. * Put back __future__ imports into determine_tests_to_run.py. Fails otherwise on a py2/print related error.
44 lines
1.2 KiB
Python
44 lines
1.2 KiB
Python
"""Example of a custom training workflow. Run this for a demo.
|
|
|
|
This example shows:
|
|
- using Tune trainable functions to implement custom training workflows
|
|
|
|
You can visualize experiment results in ~/ray_results using TensorBoard.
|
|
"""
|
|
|
|
import ray
|
|
from ray import tune
|
|
from ray.rllib.agents.ppo import PPOTrainer
|
|
|
|
|
|
def my_train_fn(config, reporter):
|
|
# Train for 100 iterations with high LR
|
|
agent1 = PPOTrainer(env="CartPole-v0", config=config)
|
|
for _ in range(10):
|
|
result = agent1.train()
|
|
result["phase"] = 1
|
|
reporter(**result)
|
|
phase1_time = result["timesteps_total"]
|
|
state = agent1.save()
|
|
agent1.stop()
|
|
|
|
# Train for 100 iterations with low LR
|
|
config["lr"] = 0.0001
|
|
agent2 = PPOTrainer(env="CartPole-v0", config=config)
|
|
agent2.restore(state)
|
|
for _ in range(10):
|
|
result = agent2.train()
|
|
result["phase"] = 2
|
|
result["timesteps_total"] += phase1_time # keep time moving forward
|
|
reporter(**result)
|
|
agent2.stop()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
ray.init()
|
|
config = {
|
|
"lr": 0.01,
|
|
"num_workers": 0,
|
|
}
|
|
resources = PPOTrainer.default_resource_request(config).to_json()
|
|
tune.run(my_train_fn, resources_per_trial=resources, config=config)
|