ray/rllib/agents/ppo/ppo.py

6 lines
133 B
Python

from ray.rllib.algorithms.ppo import ( # noqa
ppo_tf_policy,
ppo_torch_policy,
PPO as PPOTrainer,
DEFAULT_CONFIG,
)