[ci/release] Always use full cluster address (#23067)

Not using the full cluster address is deprecated and breaks Job usage for uploads/downloads: https://buildkite.com/ray-project/release-tests-branch/builds/135#2a03e47b-6a9a-42ff-9346-905725eb8d09
This commit is contained in:
Kai Fricke 2022-03-11 16:31:21 +00:00 committed by GitHub
parent 07372927cc
commit a8bed94ed6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 32 additions and 27 deletions

View file

@ -1,4 +1,4 @@
cloud_id: cld_17WvYIBBkdgLwEUNcLeRAE
cloud_id: {{env["ANYSCALE_CLOUD_ID"]}}
region: us-west-2
aws:

View file

@ -1,4 +1,5 @@
import copy
import os
from typing import Optional, Dict
from ray_release.buildkite.concurrency import CONCURRENY_GROUPS, get_concurrency_group
@ -47,7 +48,11 @@ def get_step(
step = copy.deepcopy(DEFAULT_STEP_TEMPLATE)
cmd = f"./release/run_release_test.sh \"{test['name']}\" --report"
cmd = f"./release/run_release_test.sh \"{test['name']}\" "
if not bool(int(os.environ.get("NO_REPORT_OVERRIDE", "0"))):
cmd += " --report"
if smoke_test:
cmd += " --smoke-test"

View file

@ -70,7 +70,7 @@ class ClusterManager(abc.ABC):
def terminate_cluster(self):
raise NotImplementedError
def get_cluster_address(self, full: bool = True) -> str:
def get_cluster_address(self) -> str:
raise NotImplementedError
def get_cluster_url(self) -> Optional[str]:

View file

@ -125,9 +125,3 @@ class FullClusterManager(MinimalClusterManager):
while result.result.state != "Terminated":
time.sleep(1)
result = self.sdk.get_cluster(self.cluster_id)
def get_cluster_address(self, full: bool = True) -> str:
if full:
return f"anyscale://{self.project_name}/{self.cluster_name}"
else:
return f"anyscale://{self.cluster_name}"

View file

@ -287,5 +287,5 @@ class MinimalClusterManager(ClusterManager):
def terminate_cluster(self):
pass
def get_cluster_address(self, full: bool = True) -> str:
return f"anyscale://{self.cluster_name}"
def get_cluster_address(self) -> str:
return f"anyscale://{self.project_name}/{self.cluster_name}"

View file

@ -178,7 +178,7 @@ def load_and_render_yaml_template(
render_env.update(env)
try:
content = jinja2.Template(content).render(env=env)
content = jinja2.Template(content).render(env=render_env)
return yaml.safe_load(content)
except Exception as e:
raise ReleaseTestConfigError(

View file

@ -19,17 +19,26 @@ class JobManager:
self.job_client = None
self.last_job_id = None
def _get_job_client(self) -> JobSubmissionClient:
if not self.job_client:
self.job_client = JobSubmissionClient(
self.cluster_manager.get_cluster_address()
)
return self.job_client
def _run_job(self, cmd_to_run, env_vars) -> int:
self.counter += 1
command_id = self.counter
env = os.environ.copy()
env["RAY_ADDRESS"] = self.cluster_manager.get_cluster_address(full=False)
env["RAY_ADDRESS"] = self.cluster_manager.get_cluster_address()
env.setdefault("ANYSCALE_HOST", ANYSCALE_HOST)
full_cmd = " ".join(f"{k}={v}" for k, v in env_vars.items()) + " " + cmd_to_run
logger.info(f"Executing {cmd_to_run} with {env_vars} via ray job submit")
job_id = self.job_client.submit_job(
job_client = self._get_job_client()
job_id = job_client.submit_job(
# Entrypoint shell command to execute
entrypoint=full_cmd,
)
@ -39,6 +48,8 @@ class JobManager:
return command_id
def _wait_job(self, command_id: int, timeout: int):
job_client = self._get_job_client()
start_time = time.monotonic()
timeout_at = start_time + timeout
next_status = start_time + 30
@ -56,11 +67,11 @@ class JobManager:
f"({int(now - start_time)} seconds) ..."
)
next_status += 30
status = self.job_client.get_job_status(self.job_id_pool[command_id])
status = job_client.get_job_status(self.job_id_pool[command_id])
if status in {JobStatus.SUCCEEDED, JobStatus.STOPPED, JobStatus.FAILED}:
break
time.sleep(1)
status = self.job_client.get_job_status(self.job_id_pool[command_id])
status = job_client.get_job_status(self.job_id_pool[command_id])
# TODO(sang): Propagate JobInfo.error_type
if status == JobStatus.SUCCEEDED:
retcode = 0
@ -69,18 +80,13 @@ class JobManager:
duration = time.time() - self.start_time[command_id]
return retcode, duration
def run_and_wait(self, cmd_to_run, env_vars, timeout: int = 120) -> Tuple[int, int]:
if not self.job_client:
self.job_client = JobSubmissionClient(
self.cluster_manager.get_cluster_address(full=False)
)
def run_and_wait(
self, cmd_to_run, env_vars, timeout: int = 120
) -> Tuple[int, float]:
cid = self._run_job(cmd_to_run, env_vars)
return self._wait_job(cid, timeout)
def get_last_logs(self):
# return None
if not self.job_client:
self.job_client = JobSubmissionClient(
self.cluster_manager.get_cluster_address(full=False)
)
return self.job_client.get_job_logs(self.last_job_id)
job_client = self._get_job_client()
return job_client.get_job_logs(self.last_job_id)

View file

@ -2423,7 +2423,7 @@
file_manager: job
smoke_test:
frequency: nightly
frequency: multi
cluster:
cluster_env: dask_on_ray/large_scale_dask_on_ray_app_config.yaml