2020-09-09 17:33:21 +02:00
|
|
|
# To generate training data, first run:
|
|
|
|
# $ ./train.py --run=PPO --env=CartPole-v0 \
|
|
|
|
# --stop='{"timesteps_total": 50000}' \
|
2022-03-28 19:53:12 +01:00
|
|
|
# --config='{"output": "dataset", "output_config": {"format": "json", "path": "/tmp/out", "max_num_samples_per_file": 1}, "batch_mode": "complete_episodes"}'
|
2020-09-09 17:33:21 +02:00
|
|
|
cartpole-bc:
|
|
|
|
env: CartPole-v0
|
|
|
|
run: BC
|
|
|
|
stop:
|
|
|
|
timesteps_total: 500000
|
|
|
|
config:
|
|
|
|
# Works for both torch and tf.
|
|
|
|
framework: tf
|
|
|
|
# In order to evaluate on an actual environment, use these following
|
|
|
|
# settings:
|
|
|
|
evaluation_num_workers: 1
|
|
|
|
evaluation_interval: 1
|
|
|
|
evaluation_config:
|
|
|
|
input: sampler
|
|
|
|
# The historic (offline) data file from the PPO run (at the top).
|
2022-01-26 07:00:46 -08:00
|
|
|
input: dataset
|
|
|
|
input_config:
|
2022-03-28 19:53:12 +01:00
|
|
|
format: json
|
2022-01-26 07:00:46 -08:00
|
|
|
path: /tmp/out
|