mirror of
https://github.com/vale981/ray
synced 2025-03-06 02:21:39 -05:00
[Lint] Cleanup incorrectly formatted strings (Part 3: components) (#23130)
This commit is contained in:
parent
e3987d85c3
commit
6d83a3f283
45 changed files with 92 additions and 104 deletions
|
@ -142,7 +142,7 @@ class KubernetesNodeProvider(NodeProvider):
|
|||
pod_spec["metadata"]["labels"].update(head_selector)
|
||||
|
||||
logger.info(
|
||||
log_prefix + "calling create_namespaced_pod " "(count={}).".format(count)
|
||||
log_prefix + "calling create_namespaced_pod (count={}).".format(count)
|
||||
)
|
||||
new_nodes = []
|
||||
for _ in range(count):
|
||||
|
|
|
@ -287,7 +287,7 @@ class StandardAutoscaler:
|
|||
self._update()
|
||||
except Exception as e:
|
||||
self.prom_metrics.update_loop_exceptions.inc()
|
||||
logger.exception("StandardAutoscaler: " "Error during autoscaling.")
|
||||
logger.exception("StandardAutoscaler: Error during autoscaling.")
|
||||
# Don't abort the autoscaler if the K8s API server is down.
|
||||
# https://github.com/ray-project/ray/issues/12255
|
||||
is_k8s_connection_error = self.config["provider"][
|
||||
|
@ -296,7 +296,7 @@ class StandardAutoscaler:
|
|||
if not is_k8s_connection_error:
|
||||
self.num_failures += 1
|
||||
if self.num_failures > self.max_failures:
|
||||
logger.critical("StandardAutoscaler: " "Too many errors, abort.")
|
||||
logger.critical("StandardAutoscaler: Too many errors, abort.")
|
||||
raise e
|
||||
|
||||
def _update(self):
|
||||
|
@ -969,7 +969,7 @@ class StandardAutoscaler:
|
|||
if errors_fatal:
|
||||
raise e
|
||||
else:
|
||||
logger.exception("StandardAutoscaler: " "Error parsing config.")
|
||||
logger.exception("StandardAutoscaler: Error parsing config.")
|
||||
|
||||
def launch_config_ok(self, node_id):
|
||||
if self.disable_launch_config_check:
|
||||
|
|
|
@ -579,7 +579,7 @@ class CloudwatchHelper:
|
|||
try:
|
||||
response = self.ec2_client.describe_instances(InstanceIds=[self.node_id])
|
||||
reservations = response["Reservations"]
|
||||
message = "More than 1 response received from " "describing current node"
|
||||
message = "More than 1 response received from describing current node"
|
||||
assert len(reservations) == 1, message
|
||||
instances = reservations[0]["Instances"]
|
||||
assert len(reservations) == 1, message
|
||||
|
@ -607,7 +607,7 @@ class CloudwatchHelper:
|
|||
the Unified CloudWatch Agent installed.
|
||||
"""
|
||||
logger.info(
|
||||
"Checking Unified Cloudwatch Agent " "status on node {}".format(node_id)
|
||||
"Checking Unified Cloudwatch Agent status on node {}".format(node_id)
|
||||
)
|
||||
parameters_status_cwa = {
|
||||
"action": ["status"],
|
||||
|
@ -651,7 +651,7 @@ class CloudwatchHelper:
|
|||
try:
|
||||
response = self.ec2_client.describe_instances(InstanceIds=[self.node_id])
|
||||
reservations = response["Reservations"]
|
||||
message = "More than 1 response received from " "describing current node"
|
||||
message = "More than 1 response received from describing current node"
|
||||
assert len(reservations) == 1, message
|
||||
instances = reservations[0]["Instances"]
|
||||
assert len(instances) == 1, message
|
||||
|
|
|
@ -290,7 +290,7 @@ def _configure_iam_role(config):
|
|||
role = _get_role(role_name, config)
|
||||
if role is None:
|
||||
cli_logger.verbose(
|
||||
"Creating new IAM role {} for " "use as the default instance role.",
|
||||
"Creating new IAM role {} for use as the default instance role.",
|
||||
cf.bold(role_name),
|
||||
)
|
||||
iam = _resource("iam", config)
|
||||
|
|
|
@ -41,7 +41,7 @@ def handle_boto_error(exc, msg, *args, **kwargs):
|
|||
error_code = error_info.get("Code", None)
|
||||
|
||||
generic_message_args = [
|
||||
"{}\n" "Error code: {}",
|
||||
"{}\nError code: {}",
|
||||
msg.format(*args, **kwargs),
|
||||
cf.bold(error_code),
|
||||
]
|
||||
|
@ -94,7 +94,7 @@ def handle_boto_error(exc, msg, *args, **kwargs):
|
|||
# fixme: replace with a Github URL that points
|
||||
# to our repo
|
||||
aws_session_script_url = (
|
||||
"https://gist.github.com/maximsmol/" "a0284e1d97b25d417bd9ae02e5f450cf"
|
||||
"https://gist.github.com/maximsmol/a0284e1d97b25d417bd9ae02e5f450cf"
|
||||
)
|
||||
|
||||
cli_logger.verbose_error(*generic_message_args)
|
||||
|
|
|
@ -703,7 +703,7 @@ class _CliLogger:
|
|||
|
||||
indent = " " * msg_len
|
||||
self.error(
|
||||
"{}Invalid answer: {}. " "Expected {} or {}",
|
||||
"{}Invalid answer: {}. Expected {} or {}",
|
||||
indent,
|
||||
cf.bold(ans.strip()),
|
||||
self.render_list(yes_answers, "/"),
|
||||
|
@ -792,7 +792,7 @@ CLICK_LOGGING_OPTIONS = [
|
|||
required=False,
|
||||
type=click.Choice(["auto", "false", "true"], case_sensitive=False),
|
||||
default="auto",
|
||||
help=("Use color logging. " "Auto enables color logging if stdout is a TTY."),
|
||||
help=("Use color logging. Auto enables color logging if stdout is a TTY."),
|
||||
),
|
||||
click.option("-v", "--verbose", default=None, count=True),
|
||||
]
|
||||
|
|
|
@ -198,12 +198,12 @@ class KubernetesCommandRunner(CommandRunnerInterface):
|
|||
if options.get("rsync_exclude"):
|
||||
if log_once("autoscaler_k8s_rsync_exclude"):
|
||||
logger.warning(
|
||||
"'rsync_exclude' detected but is currently " "unsupported for k8s."
|
||||
"'rsync_exclude' detected but is currently unsupported for k8s."
|
||||
)
|
||||
if options.get("rsync_filter"):
|
||||
if log_once("autoscaler_k8s_rsync_filter"):
|
||||
logger.warning(
|
||||
"'rsync_filter' detected but is currently " "unsupported for k8s."
|
||||
"'rsync_filter' detected but is currently unsupported for k8s."
|
||||
)
|
||||
if target.startswith("~"):
|
||||
target = self._home + target[1:]
|
||||
|
|
|
@ -314,7 +314,7 @@ def _bootstrap_config(
|
|||
|
||||
if log_once("_printed_cached_config_warning"):
|
||||
cli_logger.verbose_warning(
|
||||
"Loaded cached provider configuration " "from " + cf.bold("{}"),
|
||||
"Loaded cached provider configuration from " + cf.bold("{}"),
|
||||
cache_key,
|
||||
)
|
||||
if cli_logger.verbosity == 0:
|
||||
|
@ -646,7 +646,7 @@ def get_or_create_head_node(
|
|||
|
||||
if not head_node:
|
||||
cli_logger.confirm(
|
||||
yes, "No head node found. " "Launching a new cluster.", _abort=True
|
||||
yes, "No head node found. Launching a new cluster.", _abort=True
|
||||
)
|
||||
|
||||
if head_node:
|
||||
|
@ -661,12 +661,12 @@ def get_or_create_head_node(
|
|||
)
|
||||
elif no_restart:
|
||||
cli_logger.print(
|
||||
"Cluster Ray runtime will not be restarted due " "to `{}`.",
|
||||
"Cluster Ray runtime will not be restarted due to `{}`.",
|
||||
cf.bold("--no-restart"),
|
||||
)
|
||||
cli_logger.confirm(
|
||||
yes,
|
||||
"Updating cluster configuration and " "running setup commands.",
|
||||
"Updating cluster configuration and running setup commands.",
|
||||
_abort=True,
|
||||
)
|
||||
else:
|
||||
|
@ -720,7 +720,7 @@ def get_or_create_head_node(
|
|||
while True:
|
||||
if time.time() - start > 50:
|
||||
cli_logger.abort(
|
||||
"Head node fetch timed out. " "Failed to create head node."
|
||||
"Head node fetch timed out. Failed to create head node."
|
||||
)
|
||||
nodes = provider.non_terminated_nodes(head_node_tags)
|
||||
if len(nodes) == 1:
|
||||
|
@ -880,7 +880,7 @@ def _should_create_new_head(
|
|||
# Warn user
|
||||
if new_head_required:
|
||||
with cli_logger.group(
|
||||
"Currently running head node is out-of-date with cluster " "configuration"
|
||||
"Currently running head node is out-of-date with cluster configuration"
|
||||
):
|
||||
|
||||
if hashes_mismatch:
|
||||
|
|
|
@ -120,7 +120,7 @@ def wait_for_compute_global_operation(project_name, operation, compute):
|
|||
raise Exception(result["error"])
|
||||
|
||||
if result["status"] == "DONE":
|
||||
logger.info("wait_for_compute_global_operation: " "Operation done.")
|
||||
logger.info("wait_for_compute_global_operation: Operation done.")
|
||||
break
|
||||
|
||||
time.sleep(POLL_INTERVAL)
|
||||
|
@ -445,7 +445,7 @@ def _configure_key_pair(config, compute):
|
|||
# Create a key since it doesn't exist locally or in GCP
|
||||
if not key_found and not os.path.exists(private_key_path):
|
||||
logger.info(
|
||||
"_configure_key_pair: " "Creating new key pair {}".format(key_name)
|
||||
"_configure_key_pair: Creating new key pair {}".format(key_name)
|
||||
)
|
||||
public_key, private_key = generate_rsa_key_pair()
|
||||
|
||||
|
@ -480,7 +480,7 @@ def _configure_key_pair(config, compute):
|
|||
)
|
||||
assert os.path.exists(
|
||||
private_key_path
|
||||
), "Private key file {} not found for user {}" "".format(private_key_path, ssh_user)
|
||||
), "Private key file {} not found for user {}".format(private_key_path, ssh_user)
|
||||
|
||||
logger.info(
|
||||
"_configure_key_pair: "
|
||||
|
@ -599,7 +599,7 @@ def _create_project(project_id, crm):
|
|||
|
||||
def _get_service_account(account, config, iam):
|
||||
project_id = config["provider"]["project_id"]
|
||||
full_name = "projects/{project_id}/serviceAccounts/{account}" "".format(
|
||||
full_name = "projects/{project_id}/serviceAccounts/{account}".format(
|
||||
project_id=project_id, account=account
|
||||
)
|
||||
try:
|
||||
|
|
|
@ -367,7 +367,7 @@ class GCPCompute(GCPResource):
|
|||
+ ")"
|
||||
)
|
||||
|
||||
cluster_name_filter_expr = "(labels.{key} = {value})" "".format(
|
||||
cluster_name_filter_expr = "(labels.{key} = {value})".format(
|
||||
key=TAG_RAY_CLUSTER_NAME, value=self.cluster_name
|
||||
)
|
||||
|
||||
|
@ -455,7 +455,7 @@ class GCPCompute(GCPResource):
|
|||
if not re.search(".*/machineTypes/.*", existing_machine_type):
|
||||
configuration_dict[
|
||||
"machineType"
|
||||
] = "zones/{zone}/machineTypes/{machine_type}" "".format(
|
||||
] = "zones/{zone}/machineTypes/{machine_type}".format(
|
||||
zone=self.availability_zone,
|
||||
machine_type=configuration_dict["machineType"],
|
||||
)
|
||||
|
@ -465,7 +465,7 @@ class GCPCompute(GCPResource):
|
|||
if not re.search(".*/acceleratorTypes/.*", gpu_type):
|
||||
accelerator[
|
||||
"acceleratorType"
|
||||
] = "projects/{project}/zones/{zone}/" "acceleratorTypes/{accelerator}".format( # noqa: E501
|
||||
] = "projects/{project}/zones/{zone}/acceleratorTypes/{accelerator}".format( # noqa: E501
|
||||
project=self.project_id,
|
||||
zone=self.availability_zone,
|
||||
accelerator=gpu_type,
|
||||
|
|
|
@ -6,7 +6,7 @@ from typing import Dict
|
|||
from ray.autoscaler._private.cli_logger import cli_logger
|
||||
from ray._private.utils import get_ray_temp_dir
|
||||
|
||||
unsupported_field_message = "The field {} is not supported " "for on-premise clusters."
|
||||
unsupported_field_message = "The field {} is not supported for on-premise clusters."
|
||||
|
||||
LOCAL_CLUSTER_NODE_TYPE = "local.cluster.node"
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ class ClusterState:
|
|||
else:
|
||||
workers = {}
|
||||
logger.info(
|
||||
"ClusterState: " "Loaded cluster state: {}".format(list(workers))
|
||||
"ClusterState: Loaded cluster state: {}".format(list(workers))
|
||||
)
|
||||
for worker_ip in provider_config["worker_ips"]:
|
||||
if worker_ip not in workers:
|
||||
|
@ -89,7 +89,7 @@ class ClusterState:
|
|||
assert len(workers) == len(provider_config["worker_ips"]) + 1
|
||||
with open(self.save_path, "w") as f:
|
||||
logger.debug(
|
||||
"ClusterState: " "Writing cluster state: {}".format(workers)
|
||||
"ClusterState: Writing cluster state: {}".format(workers)
|
||||
)
|
||||
f.write(json.dumps(workers))
|
||||
|
||||
|
|
|
@ -209,7 +209,7 @@ class Monitor:
|
|||
)
|
||||
elif not prometheus_client:
|
||||
logger.warning(
|
||||
"`prometheus_client` not found, so metrics will " "not be exported."
|
||||
"`prometheus_client` not found, so metrics will not be exported."
|
||||
)
|
||||
|
||||
logger.info("Monitor: Started")
|
||||
|
@ -499,7 +499,7 @@ def log_resource_batch_data_if_desired(
|
|||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description=("Parse Redis server for the " "monitor to connect to.")
|
||||
description=("Parse Redis server for the monitor to connect to.")
|
||||
)
|
||||
parser.add_argument(
|
||||
"--gcs-address", required=False, type=str, help="The address (ip:port) of GCS."
|
||||
|
@ -548,7 +548,7 @@ if __name__ == "__main__":
|
|||
"--logs-dir",
|
||||
required=True,
|
||||
type=str,
|
||||
help="Specify the path of the temporary directory used by Ray " "processes.",
|
||||
help="Specify the path of the temporary directory used by Ray processes.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logging-rotate-bytes",
|
||||
|
|
|
@ -156,7 +156,7 @@ try:
|
|||
)
|
||||
self.update_loop_exceptions: Counter = Counter(
|
||||
"update_loop_exceptions",
|
||||
"Number of exceptions raised in the update loop of the " "autoscaler.",
|
||||
"Number of exceptions raised in the update loop of the autoscaler.",
|
||||
unit="exceptions",
|
||||
namespace="autoscaler",
|
||||
registry=self.registry,
|
||||
|
|
|
@ -366,7 +366,7 @@ class StaroidNodeProvider(NodeProvider):
|
|||
)
|
||||
|
||||
logger.info(
|
||||
log_prefix + "calling create_namespaced_pod " "(count={}).".format(count)
|
||||
log_prefix + "calling create_namespaced_pod (count={}).".format(count)
|
||||
)
|
||||
new_nodes = []
|
||||
for _ in range(count):
|
||||
|
|
|
@ -411,7 +411,7 @@ def handle_ssh_fails(e, first_conn_refused_time, retry_interval):
|
|||
|
||||
if e.special_case in ["ssh_timeout", "ssh_conn_refused"]:
|
||||
cli_logger.print(
|
||||
"SSH still not available, " "retrying in {} seconds.",
|
||||
"SSH still not available, retrying in {} seconds.",
|
||||
cf.bold(str(retry_interval)),
|
||||
)
|
||||
else:
|
||||
|
|
|
@ -301,7 +301,7 @@ class NodeUpdater:
|
|||
cmd_ = " ".join(e.cmd)
|
||||
else:
|
||||
logger.debug(
|
||||
f"e.cmd type ({type(e.cmd)}) not " "list or str."
|
||||
f"e.cmd type ({type(e.cmd)}) not list or str."
|
||||
)
|
||||
cmd_ = str(e.cmd)
|
||||
retry_str = "(Exit Status {}): {}".format(
|
||||
|
@ -309,7 +309,7 @@ class NodeUpdater:
|
|||
)
|
||||
|
||||
cli_logger.print(
|
||||
"SSH still not available {}, " "retrying in {} seconds.",
|
||||
"SSH still not available {}, retrying in {} seconds.",
|
||||
cf.dimmed(retry_str),
|
||||
cf.bold(str(READY_CHECK_INTERVAL)),
|
||||
)
|
||||
|
|
|
@ -156,7 +156,7 @@ def validate_config(config: Dict[str, Any]) -> None:
|
|||
if "available_node_types" in config:
|
||||
if "head_node_type" not in config:
|
||||
raise ValueError(
|
||||
"You must specify `head_node_type` if `available_node_types " "is set."
|
||||
"You must specify `head_node_type` if `available_node_types is set."
|
||||
)
|
||||
if config["head_node_type"] not in config["available_node_types"]:
|
||||
raise ValueError("`head_node_type` must be one of `available_node_types`.")
|
||||
|
|
|
@ -155,7 +155,7 @@ class NodeProvider:
|
|||
mapping from deleted node ids to node metadata.
|
||||
"""
|
||||
for node_id in node_ids:
|
||||
logger.info("NodeProvider: " "{}: Terminating node".format(node_id))
|
||||
logger.info("NodeProvider: {}: Terminating node".format(node_id))
|
||||
self.terminate_node(node_id)
|
||||
return None
|
||||
|
||||
|
|
|
@ -52,9 +52,7 @@ class ProgressBar:
|
|||
else:
|
||||
global needs_warning
|
||||
if needs_warning:
|
||||
print(
|
||||
"[dataset]: Run `pip install tqdm` to enable " "progress reporting."
|
||||
)
|
||||
print("[dataset]: Run `pip install tqdm` to enable progress reporting.")
|
||||
needs_warning = False
|
||||
self._bar = None
|
||||
|
||||
|
|
|
@ -232,7 +232,7 @@ def test_batch_tensors(ray_start_regular_shared):
|
|||
import torch
|
||||
|
||||
ds = ray.data.from_items([torch.tensor([0, 0]) for _ in range(40)])
|
||||
res = "Dataset(num_blocks=40, num_rows=40, " "schema=<class 'torch.Tensor'>)"
|
||||
res = "Dataset(num_blocks=40, num_rows=40, schema=<class 'torch.Tensor'>)"
|
||||
assert str(ds) == res, str(ds)
|
||||
with pytest.raises(pa.lib.ArrowInvalid):
|
||||
next(ds.iter_batches(batch_format="pyarrow"))
|
||||
|
@ -3044,9 +3044,7 @@ def test_sort_partition_same_key_to_same_block(ray_start_regular_shared):
|
|||
def test_column_name_type_check(ray_start_regular_shared):
|
||||
df = pd.DataFrame({"1": np.random.rand(10), "a": np.random.rand(10)})
|
||||
ds = ray.data.from_pandas(df)
|
||||
expected_str = (
|
||||
"Dataset(num_blocks=1, num_rows=10, " "schema={1: float64, a: float64})"
|
||||
)
|
||||
expected_str = "Dataset(num_blocks=1, num_rows=10, schema={1: float64, a: float64})"
|
||||
assert str(ds) == expected_str, str(ds)
|
||||
df = pd.DataFrame({1: np.random.rand(10), "a": np.random.rand(10)})
|
||||
with pytest.raises(ValueError):
|
||||
|
|
|
@ -142,7 +142,7 @@ def eye(dim1, dim2=-1, dtype_name="float"):
|
|||
def triu(a):
|
||||
if a.ndim != 2:
|
||||
raise Exception(
|
||||
"Input must have 2 dimensions, but a.ndim is " "{}.".format(a.ndim)
|
||||
"Input must have 2 dimensions, but a.ndim is {}.".format(a.ndim)
|
||||
)
|
||||
result = DistArray(a.shape)
|
||||
for (i, j) in np.ndindex(*result.num_blocks):
|
||||
|
@ -159,7 +159,7 @@ def triu(a):
|
|||
def tril(a):
|
||||
if a.ndim != 2:
|
||||
raise Exception(
|
||||
"Input must have 2 dimensions, but a.ndim is " "{}.".format(a.ndim)
|
||||
"Input must have 2 dimensions, but a.ndim is {}.".format(a.ndim)
|
||||
)
|
||||
result = DistArray(a.shape)
|
||||
for (i, j) in np.ndindex(*result.num_blocks):
|
||||
|
|
|
@ -25,7 +25,7 @@ def tsqr(a):
|
|||
"""
|
||||
if len(a.shape) != 2:
|
||||
raise Exception(
|
||||
"tsqr requires len(a.shape) == 2, but a.shape is " "{}".format(a.shape)
|
||||
"tsqr requires len(a.shape) == 2, but a.shape is {}".format(a.shape)
|
||||
)
|
||||
if a.num_blocks[1] != 1:
|
||||
raise Exception(
|
||||
|
|
|
@ -64,9 +64,9 @@ class _PyObjScanner(ray.cloudpickle.CloudPickler):
|
|||
|
||||
def find_nodes(self, obj: Any) -> List["DAGNode"]:
|
||||
"""Find top-level DAGNodes."""
|
||||
assert self._found is None, (
|
||||
"find_nodes cannot be called twice on the same " "PyObjScanner instance."
|
||||
)
|
||||
assert (
|
||||
self._found is None
|
||||
), "find_nodes cannot be called twice on the same PyObjScanner instance."
|
||||
self._found = []
|
||||
self.dump(obj)
|
||||
return self._found
|
||||
|
|
|
@ -259,7 +259,7 @@ def set_status(cluster_name: str, cluster_namespace: str, status: str) -> None:
|
|||
except ApiException as e:
|
||||
if e.status == 409:
|
||||
logger.info(
|
||||
"Caught a 409 error while setting" " RayCluster status. Retrying..."
|
||||
"Caught a 409 error while setting RayCluster status. Retrying..."
|
||||
)
|
||||
time.sleep(DELAY_BEFORE_STATUS_RETRY)
|
||||
continue
|
||||
|
|
|
@ -285,7 +285,7 @@ def debug(address):
|
|||
required=False,
|
||||
hidden=True,
|
||||
type=str,
|
||||
help="the port to use for the Redis shards other than the " "primary Redis shard",
|
||||
help="the port to use for the Redis shards other than the primary Redis shard",
|
||||
)
|
||||
@click.option(
|
||||
"--object-manager-port",
|
||||
|
@ -372,7 +372,7 @@ def debug(address):
|
|||
required=False,
|
||||
default="{}",
|
||||
type=str,
|
||||
help="a JSON serialized dictionary mapping resource name to " "resource quantity",
|
||||
help="a JSON serialized dictionary mapping resource name to resource quantity",
|
||||
)
|
||||
@click.option(
|
||||
"--head",
|
||||
|
@ -476,14 +476,14 @@ def debug(address):
|
|||
type=int,
|
||||
hidden=True,
|
||||
default=None,
|
||||
help="the port to use to expose Ray metrics through a " "Prometheus endpoint.",
|
||||
help="the port to use to expose Ray metrics through a Prometheus endpoint.",
|
||||
)
|
||||
@click.option(
|
||||
"--no-monitor",
|
||||
is_flag=True,
|
||||
hidden=True,
|
||||
default=False,
|
||||
help="If True, the ray autoscaler monitor for this cluster will not be " "started.",
|
||||
help="If True, the ray autoscaler monitor for this cluster will not be started.",
|
||||
)
|
||||
@click.option(
|
||||
"--tracing-startup-hook",
|
||||
|
@ -801,7 +801,7 @@ def start(
|
|||
# Ensure `--address` flag is specified.
|
||||
if address is None:
|
||||
cli_logger.abort(
|
||||
"`{}` is a required flag unless starting a head " "node with `{}`.",
|
||||
"`{}` is a required flag unless starting a head node with `{}`.",
|
||||
cf.bold("--address"),
|
||||
cf.bold("--head"),
|
||||
)
|
||||
|
@ -820,7 +820,7 @@ def start(
|
|||
if val is None:
|
||||
continue
|
||||
cli_logger.abort(
|
||||
"`{}` should only be specified when starting head " "node with `{}`.",
|
||||
"`{}` should only be specified when starting head node with `{}`.",
|
||||
cf.bold(flag),
|
||||
cf.bold("--head"),
|
||||
)
|
||||
|
@ -1138,9 +1138,9 @@ def up(
|
|||
cf.bold("--restart-only"),
|
||||
cf.bold("--no-restart"),
|
||||
)
|
||||
assert restart_only != no_restart, (
|
||||
"Cannot set both 'restart_only' " "and 'no_restart' at the same time!"
|
||||
)
|
||||
assert (
|
||||
restart_only != no_restart
|
||||
), "Cannot set both 'restart_only' and 'no_restart' at the same time!"
|
||||
|
||||
if urllib.parse.urlparse(cluster_config_file).scheme in ("http", "https"):
|
||||
try:
|
||||
|
@ -1430,7 +1430,7 @@ def submit(
|
|||
)
|
||||
cli_logger.doassert(
|
||||
not (script_args and args),
|
||||
"`{0}` and `{1}` are incompatible. Use only `{1}`.\n" "Example: `{2}`",
|
||||
"`{0}` and `{1}` are incompatible. Use only `{1}`.\nExample: `{2}`",
|
||||
cf.bold("--args"),
|
||||
cf.bold("-- <args ...>"),
|
||||
cf.bold("ray submit script.py -- --arg=123 --flag"),
|
||||
|
@ -2166,7 +2166,7 @@ def install_nightly(verbose, dryrun):
|
|||
"-gen",
|
||||
required=False,
|
||||
type=str,
|
||||
help="The directory to generate the bazel project template to," " if provided.",
|
||||
help="The directory to generate the bazel project template to, if provided.",
|
||||
)
|
||||
@add_click_logging_options
|
||||
def cpp(show_library_path, generate_bazel_project_template_to):
|
||||
|
|
|
@ -1504,7 +1504,7 @@ def get_deployment(name: str) -> Deployment:
|
|||
) = internal_get_global_client().get_deployment_info(name)
|
||||
except KeyError:
|
||||
raise KeyError(
|
||||
f"Deployment {name} was not found. " "Did you call Deployment.deploy()?"
|
||||
f"Deployment {name} was not found. Did you call Deployment.deploy()?"
|
||||
)
|
||||
return Deployment(
|
||||
cloudpickle.loads(deployment_info.replica_config.serialized_deployment_def),
|
||||
|
|
|
@ -241,7 +241,7 @@ def batch(_func=None, max_batch_size=10, batch_wait_timeout_s=0.0):
|
|||
if _func is not None:
|
||||
if not callable(_func):
|
||||
raise TypeError(
|
||||
"@serve.batch can only be used to " "decorate functions or methods."
|
||||
"@serve.batch can only be used to decorate functions or methods."
|
||||
)
|
||||
|
||||
if not iscoroutinefunction(_func):
|
||||
|
|
|
@ -1183,9 +1183,9 @@ class DeploymentState:
|
|||
def _scale_deployment_replicas(self) -> bool:
|
||||
"""Scale the given deployment to the number of replicas."""
|
||||
|
||||
assert self._target_replicas >= 0, (
|
||||
"Number of replicas must be" " greater than or equal to 0."
|
||||
)
|
||||
assert (
|
||||
self._target_replicas >= 0
|
||||
), "Number of replicas must be greater than or equal to 0."
|
||||
|
||||
replicas_stopped = self._stop_wrong_version_replicas()
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ class HTTPProxy:
|
|||
self.deployment_request_error_counter = metrics.Counter(
|
||||
"serve_num_deployment_http_error_requests",
|
||||
description=(
|
||||
"The number of non-200 HTTP responses returned by each " "deployment."
|
||||
"The number of non-200 HTTP responses returned by each deployment."
|
||||
),
|
||||
tag_keys=("deployment",),
|
||||
)
|
||||
|
|
|
@ -114,7 +114,7 @@ class LongPollClient:
|
|||
if self.event_loop.is_running():
|
||||
self.event_loop.call_soon_threadsafe(callback)
|
||||
else:
|
||||
logger.error("The event loop is closed, shutting down long poll " "client.")
|
||||
logger.error("The event loop is closed, shutting down long poll client.")
|
||||
self.is_running = False
|
||||
|
||||
def _process_update(self, updates: Dict[str, UpdatedObject]):
|
||||
|
@ -122,7 +122,7 @@ class LongPollClient:
|
|||
# This can happen during shutdown where the controller is
|
||||
# intentionally killed, the client should just gracefully
|
||||
# exit.
|
||||
logger.debug("LongPollClient failed to connect to host. " "Shutting down.")
|
||||
logger.debug("LongPollClient failed to connect to host. Shutting down.")
|
||||
self.is_running = False
|
||||
return
|
||||
|
||||
|
|
|
@ -253,7 +253,7 @@ class RayServeReplica:
|
|||
self.request_counter = metrics.Counter(
|
||||
"serve_deployment_request_counter",
|
||||
description=(
|
||||
"The number of queries that have been " "processed in this replica."
|
||||
"The number of queries that have been processed in this replica."
|
||||
),
|
||||
tag_keys=("deployment", "replica"),
|
||||
)
|
||||
|
@ -264,7 +264,7 @@ class RayServeReplica:
|
|||
self.error_counter = metrics.Counter(
|
||||
"serve_deployment_error_counter",
|
||||
description=(
|
||||
"The number of exceptions that have " "occurred in this replica."
|
||||
"The number of exceptions that have occurred in this replica."
|
||||
),
|
||||
tag_keys=("deployment", "replica"),
|
||||
)
|
||||
|
@ -275,7 +275,7 @@ class RayServeReplica:
|
|||
self.restart_counter = metrics.Counter(
|
||||
"serve_deployment_replica_starts",
|
||||
description=(
|
||||
"The number of times this replica " "has been restarted due to failure."
|
||||
"The number of times this replica has been restarted due to failure."
|
||||
),
|
||||
tag_keys=("deployment", "replica"),
|
||||
)
|
||||
|
|
|
@ -12,9 +12,7 @@ def make_kv_store(checkpoint_path, namespace):
|
|||
"""Create KVStore instance based on checkpoint_path configuration"""
|
||||
|
||||
if checkpoint_path == DEFAULT_CHECKPOINT_PATH:
|
||||
logger.info(
|
||||
"Using RayInternalKVStore for controller " "checkpoint and recovery."
|
||||
)
|
||||
logger.info("Using RayInternalKVStore for controller checkpoint and recovery.")
|
||||
return RayInternalKVStore(namespace)
|
||||
else:
|
||||
parsed_url = urlparse(checkpoint_path)
|
||||
|
|
|
@ -94,9 +94,9 @@ def test_recover_start_from_replica_actor_names(serve_instance):
|
|||
actor_handle = ray.get_actor(replica_name)
|
||||
ref = actor_handle.get_metadata.remote()
|
||||
_, version = ray.get(ref)
|
||||
assert replica_version_hash == hash(version), (
|
||||
"Replica version hash should be the same after " "recover from actor names"
|
||||
)
|
||||
assert replica_version_hash == hash(
|
||||
version
|
||||
), "Replica version hash should be the same after recover from actor names"
|
||||
|
||||
|
||||
def test_recover_rolling_update_from_replica_actor_names(serve_instance):
|
||||
|
|
|
@ -48,7 +48,7 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
require_version(
|
||||
"datasets>=1.8.0",
|
||||
"To fix: pip install -r examples/pytorch/text-classification/requirements" ".txt",
|
||||
"To fix: pip install -r examples/pytorch/text-classification/requirements.txt",
|
||||
)
|
||||
|
||||
task_to_keys = {
|
||||
|
@ -66,7 +66,7 @@ task_to_keys = {
|
|||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Finetune a transformers model on a text classification " "task"
|
||||
description="Finetune a transformers model on a text classification task"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--task_name",
|
||||
|
@ -132,7 +132,7 @@ def parse_args():
|
|||
"--learning_rate",
|
||||
type=float,
|
||||
default=5e-5,
|
||||
help="Initial learning rate (after the potential warmup period) to " "use.",
|
||||
help="Initial learning rate (after the potential warmup period) to use.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--weight_decay", type=float, default=0.0, help="Weight decay to use."
|
||||
|
|
|
@ -210,9 +210,7 @@ class WorkerGroup:
|
|||
# Wait for actors to die gracefully.
|
||||
done, not_done = ray.wait(done_refs, timeout=patience_s)
|
||||
if not_done:
|
||||
logger.debug(
|
||||
"Graceful termination failed. Falling back to " "force kill."
|
||||
)
|
||||
logger.debug("Graceful termination failed. Falling back to force kill.")
|
||||
# If all actors are not able to die gracefully, then kill them.
|
||||
for worker in self.workers:
|
||||
ray.kill(worker.actor)
|
||||
|
|
|
@ -14,7 +14,7 @@ from ray._private.parameter import RayParams
|
|||
from ray._private.ray_logging import get_worker_log_file_name, configure_log_file
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=("Parse addresses for the worker " "to connect to.")
|
||||
description=("Parse addresses for the worker to connect to.")
|
||||
)
|
||||
parser.add_argument(
|
||||
"--node-ip-address",
|
||||
|
|
|
@ -70,10 +70,10 @@ def init(storage: "Optional[Union[str, Storage]]" = None) -> None:
|
|||
# we have to use the 'else' branch because we would raise a
|
||||
# runtime error, but we do not want to be captured by 'except'
|
||||
if _storage.storage_url == storage.storage_url:
|
||||
logger.warning("Calling 'workflow.init()' again with the same " "storage.")
|
||||
logger.warning("Calling 'workflow.init()' again with the same storage.")
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"Calling 'workflow.init()' again with a " "different storage"
|
||||
"Calling 'workflow.init()' again with a different storage"
|
||||
)
|
||||
storage_base.set_global_storage(storage)
|
||||
workflow_access.init_management_actor()
|
||||
|
@ -549,9 +549,7 @@ def wait(
|
|||
|
||||
for w in workflows:
|
||||
if not isinstance(w, Workflow):
|
||||
raise TypeError(
|
||||
"The input of workflow.wait should be a list " "of workflows."
|
||||
)
|
||||
raise TypeError("The input of workflow.wait should be a list of workflows.")
|
||||
wait_inputs = serialization_context.make_workflow_inputs(workflows)
|
||||
step_options = WorkflowStepRuntimeOptions.make(
|
||||
step_type=StepType.WAIT,
|
||||
|
|
|
@ -537,9 +537,7 @@ class Workflow(Generic[T]):
|
|||
@PublicAPI(stability="beta")
|
||||
class WorkflowNotFoundError(Exception):
|
||||
def __init__(self, workflow_id: str):
|
||||
self.message = (
|
||||
f"Workflow[id={workflow_id}] was referenced but " "doesn't exist."
|
||||
)
|
||||
self.message = f"Workflow[id={workflow_id}] was referenced but doesn't exist."
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ def _put_helper(
|
|||
# nested object refs.
|
||||
if isinstance(obj, ray.ObjectRef):
|
||||
raise NotImplementedError(
|
||||
"Workflow does not support checkpointing " "nested object references yet."
|
||||
"Workflow does not support checkpointing nested object references yet."
|
||||
)
|
||||
paths = obj_id_to_paths(workflow_id, identifier)
|
||||
promise = dump_to_storage(paths, obj, workflow_id, storage, update_existing=False)
|
||||
|
|
|
@ -457,7 +457,7 @@ def _workflow_step_executor(
|
|||
if step_type == StepType.READONLY_ACTOR_METHOD:
|
||||
if isinstance(volatile_output, Workflow):
|
||||
raise TypeError(
|
||||
"Returning a Workflow from a readonly virtual actor " "is not allowed."
|
||||
"Returning a Workflow from a readonly virtual actor is not allowed."
|
||||
)
|
||||
assert not isinstance(persisted_output, Workflow)
|
||||
else:
|
||||
|
|
|
@ -67,7 +67,7 @@ def get_global_storage() -> Storage:
|
|||
global _global_storage
|
||||
if _global_storage is None:
|
||||
raise RuntimeError(
|
||||
"`workflow.init()` must be called prior to " "using the workflows API."
|
||||
"`workflow.init()` must be called prior to using the workflows API."
|
||||
)
|
||||
return _global_storage
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ class FilesystemStorageImpl(Storage):
|
|||
if self._workflow_root_dir.exists():
|
||||
if not self._workflow_root_dir.is_dir():
|
||||
raise ValueError(
|
||||
f"storage path {workflow_root_dir} must be" " a directory."
|
||||
f"storage path {workflow_root_dir} must be a directory."
|
||||
)
|
||||
else:
|
||||
self._workflow_root_dir.mkdir(parents=True)
|
||||
|
|
|
@ -221,7 +221,7 @@ def test_get_named_step_default(workflow_start_regular, tmp_path):
|
|||
assert math.factorial(5) == factorial.step(5).run("factorial")
|
||||
for i in range(5):
|
||||
step_name = (
|
||||
"test_basic_workflows_2." "test_get_named_step_default.locals.factorial"
|
||||
"test_basic_workflows_2.test_get_named_step_default.locals.factorial"
|
||||
)
|
||||
if i != 0:
|
||||
step_name += "_" + str(i)
|
||||
|
@ -252,7 +252,7 @@ def test_no_init(shutdown_only):
|
|||
pass
|
||||
|
||||
fail_wf_init_error_msg = re.escape(
|
||||
"`workflow.init()` must be called prior to using " "the workflows API."
|
||||
"`workflow.init()` must be called prior to using the workflows API."
|
||||
)
|
||||
|
||||
with pytest.raises(RuntimeError, match=fail_wf_init_error_msg):
|
||||
|
|
|
@ -173,7 +173,7 @@ class WorkflowManagementActor:
|
|||
"""
|
||||
if workflow_id in self._workflow_outputs and not ignore_existing:
|
||||
raise RuntimeError(
|
||||
f"The output of workflow[id={workflow_id}] " "already exists."
|
||||
f"The output of workflow[id={workflow_id}] already exists."
|
||||
)
|
||||
wf_store = workflow_storage.WorkflowStorage(workflow_id, self._store)
|
||||
workflow_prerun_metadata = {"start_time": time.time()}
|
||||
|
|
Loading…
Add table
Reference in a new issue