ray/rllib/agents/pg/default_config.py

22 lines
612 B
Python

from ray.rllib.agents.trainer import with_common_config
# fmt: off
# __sphinx_doc_begin__
# Add the following (PG-specific) updates to the (base) `Trainer` config in
# rllib/agents/trainer.py (`COMMON_CONFIG` dict).
DEFAULT_CONFIG = with_common_config({
# No remote workers by default.
"num_workers": 0,
# Learning rate.
"lr": 0.0004,
# Experimental: By default, switch off preprocessors for PG.
"_disable_preprocessor_api": True,
# Use new `training_iteration` API (instead of `execution_plan` method).
"_disable_execution_plan_api": True,
})
# __sphinx_doc_end__
# fmt: on