diff --git a/python/ray/autoscaler/_private/aws/config.py b/python/ray/autoscaler/_private/aws/config.py index 83b25d116..babfa9e5b 100644 --- a/python/ray/autoscaler/_private/aws/config.py +++ b/python/ray/autoscaler/_private/aws/config.py @@ -337,11 +337,7 @@ def _configure_key_pair(config): "No matching local key file for any of the key pairs in this " "account with ids from 0..{}. " "Consider deleting some unused keys pairs from your account.", - key_name) # todo: err msg - raise ValueError( - "No matching local key file for any of the key pairs in this " - "account with ids from 0..{}. ".format(key_name) + - "Consider deleting some unused keys pairs from your account.") + key_name) cli_logger.doassert( os.path.exists(key_path), "Private key file " + cf.bold("{}") + @@ -391,13 +387,8 @@ def _configure_subnet(config): "and trying this again.\n" "Note that the subnet must map public IPs " "on instance launch unless you set `use_internal_ips: true` in " - "the `provider` config.") # todo: err msg - raise Exception( - "No usable subnets found, try manually creating an instance in " - "your specified region to populate the list of subnets " - "and trying this again. Note that the subnet must map public IPs " - "on instance launch unless you set 'use_internal_ips': True in " - "the 'provider' config.") + "the `provider` config.") + if "availability_zone" in config["provider"]: azs = config["provider"]["availability_zone"].split(",") subnets = [s for s in subnets if s.availability_zone in azs] @@ -407,13 +398,7 @@ def _configure_subnet(config): "Choose a different availability zone or try " "manually creating an instance in your specified region " "to populate the list of subnets and trying this again.", - config["provider"]["availability_zone"]) # todo: err msg - raise Exception( - "No usable subnets matching availability zone {} " - "found. Choose a different availability zone or try " - "manually creating an instance in your specified region " - "to populate the list of subnets and trying this again.". - format(config["provider"]["availability_zone"])) + config["provider"]["availability_zone"]) # Use subnets in only one VPC, so that _configure_security_groups only # needs to create a security group in this one VPC. Otherwise, we'd need diff --git a/python/ray/autoscaler/_private/aws/node_provider.py b/python/ray/autoscaler/_private/aws/node_provider.py index 238c9c887..ca4649ec1 100644 --- a/python/ray/autoscaler/_private/aws/node_provider.py +++ b/python/ray/autoscaler/_private/aws/node_provider.py @@ -399,12 +399,12 @@ class AWSNodeProvider(NodeProvider): break except botocore.exceptions.ClientError as exc: if attempt == BOTO_CREATE_MAX_RETRIES: - # todo: err msg cli_logger.abort( - "Failed to launch instances. Max attempts exceeded.") - raise exc + "Failed to launch instances. Max attempts exceeded.", + exc=exc, + ) else: - cli_logger.print( + cli_logger.warning( "create_instances: Attempt failed with {}, retrying.", exc) return created_nodes_dict diff --git a/python/ray/autoscaler/_private/cli_logger.py b/python/ray/autoscaler/_private/cli_logger.py index 9f74f6e9a..8616e41a2 100644 --- a/python/ray/autoscaler/_private/cli_logger.py +++ b/python/ray/autoscaler/_private/cli_logger.py @@ -610,7 +610,10 @@ class _CliLogger(): exc_cls = click.ClickException if self.pretty: exc_cls = SilentClickException - raise exc_cls("Exiting due to cli_logger.abort()") + + if msg is None: + msg = "Exiting due to cli_logger.abort()" + raise exc_cls(msg) def doassert(self, val: bool, msg: str, *args: Any, **kwargs: Any): """Handle assertion without throwing a scary exception. diff --git a/python/ray/autoscaler/_private/commands.py b/python/ray/autoscaler/_private/commands.py index 166966929..a02bc386a 100644 --- a/python/ray/autoscaler/_private/commands.py +++ b/python/ray/autoscaler/_private/commands.py @@ -190,7 +190,6 @@ def create_or_update_cluster( cli_logger.abort( "Provided cluster configuration file ({}) does not exist", cf.bold(config_file)) - raise except yaml.parser.ParserError as e: handle_yaml_error(e) raise @@ -211,8 +210,6 @@ def create_or_update_cluster( k for k in _NODE_PROVIDERS.keys() if _NODE_PROVIDERS[k] is not None ])) - raise NotImplementedError("Unsupported provider {}".format( - config["provider"])) printed_overrides = False @@ -644,9 +641,8 @@ def get_or_create_head_node(config: Dict[str, Any], with cli_logger.group("Fetching the new head node"): while True: if time.time() - start > 50: - cli_logger.abort( - "Head node fetch timed out.") # todo: msg - raise RuntimeError("Failed to create head node.") + cli_logger.abort("Head node fetch timed out. " + "Failed to create head node.") nodes = provider.non_terminated_nodes(head_node_tags) if len(nodes) == 1: head_node = nodes[0] diff --git a/python/ray/autoscaler/_private/updater.py b/python/ray/autoscaler/_private/updater.py index d0ec5b8f1..554cc5265 100644 --- a/python/ray/autoscaler/_private/updater.py +++ b/python/ray/autoscaler/_private/updater.py @@ -120,7 +120,6 @@ class NodeUpdater: "Either do not pass `--redirect-command-output` " "or also pass in `--use-normal-shells`.") cli_logger.abort(msg) - raise click.ClickException(msg) try: with LogTimer(self.log_prefix +