[RLlib] Fix deprecated warning for torch_ops.py (soft-replaced by torch_utils.py). (#19982)

This commit is contained in:
Sven Mika 2021-11-03 10:00:46 +01:00 committed by GitHub
parent 28d4cfb039
commit cf21c634a3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
38 changed files with 39 additions and 39 deletions

View file

@ -15,7 +15,7 @@ from ray.rllib.policy.torch_policy import LearningRateSchedule, \
EntropyCoeffSchedule
from ray.rllib.utils.deprecation import Deprecated
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping, sequence_mask
from ray.rllib.utils.torch_utils import apply_grad_clipping, sequence_mask
from ray.rllib.utils.typing import TrainerConfigDict, TensorType, \
PolicyID, LocalOptimizer

View file

@ -18,7 +18,7 @@ from ray.rllib.env.env_context import EnvContext
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.annotations import override
from ray.rllib.utils.deprecation import Deprecated
from ray.rllib.utils.torch_ops import set_torch_seed
from ray.rllib.utils.torch_utils import set_torch_seed
from ray.rllib.utils import FilterManager
logger = logging.getLogger(__name__)

View file

@ -22,7 +22,7 @@ from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.metrics.learner_info import LEARNER_STATS_KEY
from ray.rllib.utils.typing import LocalOptimizer, TensorType, \
TrainerConfigDict
from ray.rllib.utils.torch_ops import apply_grad_clipping, \
from ray.rllib.utils.torch_utils import apply_grad_clipping, \
convert_to_torch_tensor, concat_multi_gpu_td_errors
torch, nn = try_import_torch()

View file

@ -17,7 +17,7 @@ from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.spaces.simplex import Simplex
from ray.rllib.utils.torch_ops import apply_grad_clipping, \
from ray.rllib.utils.torch_utils import apply_grad_clipping, \
concat_multi_gpu_td_errors, huber_loss, l2_loss
from ray.rllib.utils.typing import TrainerConfigDict, TensorType, \
LocalOptimizer, GradInfoDict

View file

@ -14,7 +14,7 @@ from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.numpy import fc, huber_loss, l2_loss, relu, sigmoid
from ray.rllib.utils.test_utils import check, check_compute_single_action, \
check_train_results, framework_iterator
from ray.rllib.utils.torch_ops import convert_to_torch_tensor
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()

View file

@ -19,7 +19,7 @@ from ray.rllib.policy.torch_policy import LearningRateSchedule
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.exploration.parameter_noise import ParameterNoise
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping, \
from ray.rllib.utils.torch_utils import apply_grad_clipping, \
concat_multi_gpu_td_errors, FLOAT_MIN, huber_loss, \
reduce_mean_ignore_inf, softmax_cross_entropy_with_logits
from ray.rllib.utils.typing import TensorType, TrainerConfigDict

View file

@ -19,7 +19,7 @@ from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import LearningRateSchedule
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping, \
from ray.rllib.utils.torch_utils import apply_grad_clipping, \
concat_multi_gpu_td_errors, FLOAT_MIN, huber_loss, sequence_mask
from ray.rllib.utils.typing import TensorType, TrainerConfigDict

View file

@ -16,7 +16,7 @@ from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import TorchPolicy
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import concat_multi_gpu_td_errors, huber_loss
from ray.rllib.utils.torch_utils import concat_multi_gpu_td_errors, huber_loss
from ray.rllib.utils.typing import TensorType, TrainerConfigDict
torch, nn = try_import_torch()

View file

@ -11,7 +11,7 @@ from ray.rllib.policy.policy import Policy
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping
from ray.rllib.utils.torch_utils import apply_grad_clipping
from ray.rllib.utils.typing import AgentID
torch, nn = try_import_torch()

View file

@ -16,7 +16,7 @@ from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils import FilterManager
from ray.rllib.utils.annotations import override
from ray.rllib.utils.deprecation import Deprecated
from ray.rllib.utils.torch_ops import set_torch_seed
from ray.rllib.utils.torch_utils import set_torch_seed
logger = logging.getLogger(__name__)

View file

@ -13,7 +13,7 @@ from ray.rllib.utils.filter import get_filter
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space, \
unbatch
from ray.rllib.utils.torch_ops import convert_to_torch_tensor
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
torch, _ = try_import_torch()

View file

@ -34,7 +34,7 @@ from ray.rllib.agents.impala.vtrace_tf import VTraceFromLogitsReturns, \
from ray.rllib.models.torch.torch_action_dist import TorchCategorical
from ray.rllib.utils import force_list
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import convert_to_torch_tensor
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
torch, nn = try_import_torch()

View file

@ -12,7 +12,7 @@ from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import LearningRateSchedule, \
EntropyCoeffSchedule
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping, \
from ray.rllib.utils.torch_utils import apply_grad_clipping, \
explained_variance, global_norm, sequence_mask
torch, nn = try_import_torch()

View file

@ -8,7 +8,7 @@ from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.agents.ppo.ppo_tf_policy import setup_config
from ray.rllib.agents.ppo.ppo_torch_policy import vf_preds_fetches, \
ValueNetworkMixin
from ray.rllib.utils.torch_ops import apply_grad_clipping
from ray.rllib.utils.torch_utils import apply_grad_clipping
from ray.rllib.utils.framework import try_import_torch
torch, nn = try_import_torch()

View file

@ -8,7 +8,7 @@ from ray.rllib.evaluation.postprocessing import Postprocessing
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping, explained_variance
from ray.rllib.utils.torch_utils import apply_grad_clipping, explained_variance
from ray.rllib.utils.typing import TrainerConfigDict, TensorType
from ray.rllib.policy.policy import Policy
from ray.rllib.models.action_dist import ActionDistribution

View file

@ -33,7 +33,7 @@ from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch
from ray.rllib.utils.deprecation import DEPRECATED_VALUE
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO
from ray.rllib.utils.sgd import standardized
from ray.rllib.utils.torch_ops import convert_to_torch_tensor
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
from ray.rllib.utils.typing import EnvType, TrainerConfigDict
from ray.util.iter import from_actors, LocalIterator

View file

@ -16,7 +16,7 @@ from ray.rllib.policy.policy import Policy
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping
from ray.rllib.utils.torch_utils import apply_grad_clipping
from ray.rllib.utils.typing import TrainerConfigDict
torch, nn = try_import_torch()

View file

@ -7,7 +7,7 @@ from ray.rllib.evaluation.rollout_worker import get_global_worker
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.execution.common import STEPS_SAMPLED_COUNTER
from ray.rllib.utils.typing import SampleBatchType
from ray.rllib.utils.torch_ops import convert_to_torch_tensor
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
torch, nn = try_import_torch()

View file

@ -28,8 +28,8 @@ from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import EntropyCoeffSchedule, \
LearningRateSchedule
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping, explained_variance,\
global_norm, sequence_mask
from ray.rllib.utils.torch_utils import apply_grad_clipping, \
explained_variance, global_norm, sequence_mask
from ray.rllib.utils.typing import TensorType, TrainerConfigDict
torch, nn = try_import_torch()

View file

@ -17,7 +17,7 @@ from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import EntropyCoeffSchedule, \
LearningRateSchedule
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping, \
from ray.rllib.utils.torch_utils import apply_grad_clipping, \
explained_variance, sequence_mask
from ray.rllib.utils.typing import TensorType, TrainerConfigDict

View file

@ -13,7 +13,7 @@ from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import huber_loss, sequence_mask
from ray.rllib.utils.torch_utils import huber_loss, sequence_mask
from ray.rllib.utils.typing import \
ModelInputDict, TensorType, TrainerConfigDict

View file

@ -25,7 +25,7 @@ from ray.rllib.policy.torch_policy import TorchPolicy
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.spaces.simplex import Simplex
from ray.rllib.utils.torch_ops import apply_grad_clipping, \
from ray.rllib.utils.torch_utils import apply_grad_clipping, \
concat_multi_gpu_td_errors, huber_loss
from ray.rllib.utils.typing import LocalOptimizer, ModelInputDict, \
TensorType, TrainerConfigDict

View file

@ -22,7 +22,7 @@ from ray.rllib.utils.numpy import fc, huber_loss, relu
from ray.rllib.utils.spaces.simplex import Simplex
from ray.rllib.utils.test_utils import check, check_compute_single_action, \
check_train_results, framework_iterator
from ray.rllib.utils.torch_ops import convert_to_torch_tensor
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
from ray import tune
tf1, tf, tfv = try_import_tf()

View file

@ -39,7 +39,7 @@ from ray.rllib.policy.torch_policy import LearningRateSchedule as TorchLR, \
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.test_utils import check_learning_achieved
from ray.rllib.utils.tf_utils import explained_variance, make_tf_callable
from ray.rllib.utils.torch_ops import convert_to_torch_tensor
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()

View file

@ -5,7 +5,7 @@ from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFC
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.torch_ops import FLOAT_MIN
from ray.rllib.utils.torch_utils import FLOAT_MIN
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()

View file

@ -7,7 +7,7 @@ from ray.rllib.agents.dqn.dqn_torch_model import \
from ray.rllib.models.tf.fcnet import FullyConnectedNetwork
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFC
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.torch_ops import FLOAT_MIN, FLOAT_MAX
from ray.rllib.utils.torch_utils import FLOAT_MIN, FLOAT_MAX
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()

View file

@ -4,7 +4,7 @@ from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.policy.view_requirement import ViewRequirement
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.tf_utils import one_hot
from ray.rllib.utils.torch_ops import one_hot as torch_one_hot
from ray.rllib.utils.torch_utils import one_hot as torch_one_hot
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()

View file

@ -23,7 +23,7 @@ from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.view_requirement import ViewRequirement
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import one_hot
from ray.rllib.utils.torch_utils import one_hot
from ray.rllib.utils.typing import ModelConfigDict, TensorType, List
torch, nn = try_import_torch()

View file

@ -14,7 +14,7 @@ from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.spaces.space_utils import flatten_space
from ray.rllib.utils.torch_ops import one_hot
from ray.rllib.utils.torch_utils import one_hot
torch, nn = try_import_torch()

View file

@ -5,7 +5,7 @@
"""
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.utils.torch_ops import sequence_mask
from ray.rllib.utils.torch_utils import sequence_mask
from ray.rllib.utils.framework import TensorType
torch, nn = try_import_torch()

View file

@ -2,7 +2,7 @@ from typing import Union
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.utils.torch_ops import sequence_mask
from ray.rllib.utils.torch_utils import sequence_mask
from ray.rllib.utils.typing import TensorType
torch, nn = try_import_torch()

View file

@ -11,7 +11,7 @@ from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.view_requirement import ViewRequirement
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import one_hot
from ray.rllib.utils.torch_utils import one_hot
from ray.rllib.utils.typing import ModelConfigDict, TensorType
torch, nn = try_import_torch()

View file

@ -28,7 +28,7 @@ def get_activation_fn(name: Optional[str] = None, framework: str = "tf"):
if name in ["linear", None]:
return None
if name == "swish":
from ray.rllib.utils.torch_ops import Swish
from ray.rllib.utils.torch_utils import Swish
return Swish
_, nn = try_import_torch()
if name == "relu":

View file

@ -14,7 +14,7 @@ from ray.rllib.utils import add_mixins, force_list, NullContextManager
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.framework import try_import_torch, try_import_jax
from ray.rllib.utils.metrics.learner_info import LEARNER_STATS_KEY
from ray.rllib.utils.torch_ops import convert_to_non_torch_type
from ray.rllib.utils.torch_utils import convert_to_non_torch_type
from ray.rllib.utils.typing import ModelGradients, TensorType, \
TrainerConfigDict

View file

@ -26,7 +26,7 @@ from ray.rllib.utils.numpy import convert_to_numpy
from ray.rllib.utils.schedules import PiecewiseSchedule
from ray.rllib.utils.spaces.space_utils import normalize_action
from ray.rllib.utils.threading import with_lock
from ray.rllib.utils.torch_ops import convert_to_torch_tensor
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
from ray.rllib.utils.typing import ModelGradients, ModelWeights, TensorType, \
TensorStructType, TrainerConfigDict

View file

@ -18,7 +18,7 @@ from ray.rllib.utils.framework import try_import_tf, \
try_import_torch
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.tf_utils import get_placeholder, one_hot as tf_one_hot
from ray.rllib.utils.torch_ops import one_hot
from ray.rllib.utils.torch_utils import one_hot
from ray.rllib.utils.typing import FromConfigSpec, ModelConfigDict, TensorType
tf1, tf, tfv = try_import_tf()

View file

@ -13,7 +13,7 @@ from ray.rllib.utils.framework import try_import_tf, try_import_torch, \
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.numpy import convert_to_numpy
from ray.rllib.utils.schedules import Schedule, PiecewiseSchedule
from ray.rllib.utils.torch_ops import FLOAT_MIN
from ray.rllib.utils.torch_utils import FLOAT_MIN
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()

View file

@ -280,7 +280,7 @@ def get_activation_fn(name: Optional[str] = None, framework: str = "tf"):
if name in ["linear", None]:
return None
if name in ["swish", "silu"]:
from ray.rllib.utils.torch_ops import Swish
from ray.rllib.utils.torch_utils import Swish
return Swish
_, nn = try_import_torch()
if name == "relu":