2022-01-21 17:55:11 -08:00
|
|
|
import asyncio
|
|
|
|
import concurrent.futures
|
2022-06-30 13:39:01 -07:00
|
|
|
import dataclasses
|
|
|
|
from datetime import datetime
|
2022-07-08 10:51:59 -07:00
|
|
|
import enum
|
|
|
|
import logging
|
2021-08-24 12:06:26 -07:00
|
|
|
import hashlib
|
2022-06-21 15:13:29 -07:00
|
|
|
import json
|
2022-07-08 10:51:59 -07:00
|
|
|
import os
|
2022-06-21 15:13:29 -07:00
|
|
|
from typing import Any, Dict, List, Optional
|
|
|
|
|
|
|
|
import aiohttp.web
|
2021-08-11 20:26:54 -07:00
|
|
|
|
|
|
|
import ray
|
2022-07-08 10:51:59 -07:00
|
|
|
from ray.dashboard.consts import RAY_CLUSTER_ACTIVITY_HOOK
|
2022-06-21 15:13:29 -07:00
|
|
|
import ray.dashboard.optional_utils as dashboard_optional_utils
|
|
|
|
import ray.dashboard.utils as dashboard_utils
|
|
|
|
from ray._private import ray_constants
|
2022-07-08 10:51:59 -07:00
|
|
|
from ray._private.storage import _load_class
|
2022-06-21 15:13:29 -07:00
|
|
|
from ray.core.generated import gcs_pb2, gcs_service_pb2, gcs_service_pb2_grpc
|
|
|
|
from ray.dashboard.modules.job.common import JOB_ID_METADATA_KEY, JobInfoStorageClient
|
2021-11-10 20:24:53 -08:00
|
|
|
from ray.experimental.internal_kv import (
|
2021-08-31 12:46:41 -05:00
|
|
|
_internal_kv_get,
|
2022-06-21 15:13:29 -07:00
|
|
|
_internal_kv_initialized,
|
2021-08-31 12:46:41 -05:00
|
|
|
_internal_kv_list,
|
|
|
|
)
|
2022-02-22 16:18:16 -06:00
|
|
|
from ray.job_submission import JobInfo
|
2022-06-21 15:13:29 -07:00
|
|
|
from ray.runtime_env import RuntimeEnv
|
2021-05-20 08:10:32 -07:00
|
|
|
|
2022-07-08 10:51:59 -07:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
logger.setLevel(logging.INFO)
|
|
|
|
|
2022-01-24 14:11:32 +09:00
|
|
|
routes = dashboard_optional_utils.ClassMethodRouteTable
|
2021-05-20 08:10:32 -07:00
|
|
|
|
|
|
|
|
2022-07-08 10:51:59 -07:00
|
|
|
class RayActivityStatus(str, enum.Enum):
|
|
|
|
ACTIVE = "ACTIVE"
|
|
|
|
INACTIVE = "INACTIVE"
|
|
|
|
ERROR = "ERROR"
|
|
|
|
|
|
|
|
|
2022-06-30 13:39:01 -07:00
|
|
|
@dataclasses.dataclass
|
|
|
|
class RayActivityResponse:
|
|
|
|
"""
|
|
|
|
Dataclass used to inform if a particular Ray component can be considered
|
|
|
|
active, and metadata about observation.
|
|
|
|
"""
|
|
|
|
|
2022-07-08 10:51:59 -07:00
|
|
|
# Whether the corresponding Ray component is considered active or inactive,
|
|
|
|
# or if there was an error while collecting this observation.
|
|
|
|
is_active: RayActivityStatus
|
|
|
|
# Reason if Ray component is considered active or errored.
|
2022-06-30 13:39:01 -07:00
|
|
|
reason: Optional[str] = None
|
2022-07-08 10:51:59 -07:00
|
|
|
# Timestamp of when this observation about the Ray component was made.
|
2022-06-30 13:39:01 -07:00
|
|
|
timestamp: Optional[float] = None
|
|
|
|
|
|
|
|
|
2021-09-13 20:03:15 -07:00
|
|
|
class APIHead(dashboard_utils.DashboardHeadModule):
|
2021-05-20 08:10:32 -07:00
|
|
|
def __init__(self, dashboard_head):
|
|
|
|
super().__init__(dashboard_head)
|
|
|
|
self._gcs_job_info_stub = None
|
|
|
|
self._gcs_actor_info_stub = None
|
2021-06-02 15:07:06 -07:00
|
|
|
self._dashboard_head = dashboard_head
|
2021-08-31 12:46:41 -05:00
|
|
|
assert _internal_kv_initialized()
|
2022-02-22 16:18:16 -06:00
|
|
|
self._job_info_client = JobInfoStorageClient()
|
2022-01-21 17:55:11 -08:00
|
|
|
# For offloading CPU intensive work.
|
|
|
|
self._thread_pool = concurrent.futures.ThreadPoolExecutor(
|
|
|
|
max_workers=2, thread_name_prefix="api_head"
|
|
|
|
)
|
2021-08-31 12:46:41 -05:00
|
|
|
|
2021-09-13 20:03:15 -07:00
|
|
|
@routes.get("/api/actors/kill")
|
|
|
|
async def kill_actor_gcs(self, req) -> aiohttp.web.Response:
|
|
|
|
actor_id = req.query.get("actor_id")
|
|
|
|
force_kill = req.query.get("force_kill", False) in ("true", "True")
|
|
|
|
no_restart = req.query.get("no_restart", False) in ("true", "True")
|
|
|
|
if not actor_id:
|
2022-01-24 14:11:32 +09:00
|
|
|
return dashboard_optional_utils.rest_response(
|
2021-09-13 20:03:15 -07:00
|
|
|
success=False, message="actor_id is required."
|
|
|
|
)
|
|
|
|
|
|
|
|
request = gcs_service_pb2.KillActorViaGcsRequest()
|
|
|
|
request.actor_id = bytes.fromhex(actor_id)
|
|
|
|
request.force_kill = force_kill
|
|
|
|
request.no_restart = no_restart
|
|
|
|
await self._gcs_actor_info_stub.KillActorViaGcs(request, timeout=5)
|
|
|
|
|
|
|
|
message = (
|
|
|
|
f"Force killed actor with id {actor_id}"
|
|
|
|
if force_kill
|
|
|
|
else f"Requested actor with id {actor_id} to terminate. "
|
|
|
|
+ "It will exit once running tasks complete"
|
|
|
|
)
|
|
|
|
|
2022-01-24 14:11:32 +09:00
|
|
|
return dashboard_optional_utils.rest_response(success=True, message=message)
|
2021-09-13 20:03:15 -07:00
|
|
|
|
2021-05-20 08:10:32 -07:00
|
|
|
@routes.get("/api/snapshot")
|
|
|
|
async def snapshot(self, req):
|
2022-02-18 07:54:37 -08:00
|
|
|
(
|
2022-02-22 16:18:16 -06:00
|
|
|
job_info,
|
2022-02-18 07:54:37 -08:00
|
|
|
job_submission_data,
|
|
|
|
actor_data,
|
|
|
|
serve_data,
|
|
|
|
session_name,
|
|
|
|
) = await asyncio.gather(
|
2022-01-21 17:55:11 -08:00
|
|
|
self.get_job_info(),
|
2022-02-18 07:54:37 -08:00
|
|
|
self.get_job_submission_info(),
|
2022-01-21 17:55:11 -08:00
|
|
|
self.get_actor_info(),
|
|
|
|
self.get_serve_info(),
|
|
|
|
self.get_session_name(),
|
|
|
|
)
|
2021-05-20 08:10:32 -07:00
|
|
|
snapshot = {
|
2022-02-22 16:18:16 -06:00
|
|
|
"jobs": job_info,
|
2022-02-18 07:54:37 -08:00
|
|
|
"job_submission": job_submission_data,
|
2021-05-20 08:10:32 -07:00
|
|
|
"actors": actor_data,
|
2021-08-06 15:03:29 -07:00
|
|
|
"deployments": serve_data,
|
2021-06-02 15:07:06 -07:00
|
|
|
"session_name": session_name,
|
2021-08-11 20:26:54 -07:00
|
|
|
"ray_version": ray.__version__,
|
|
|
|
"ray_commit": ray.__commit__,
|
2021-05-20 08:10:32 -07:00
|
|
|
}
|
2022-01-24 14:11:32 +09:00
|
|
|
return dashboard_optional_utils.rest_response(
|
2021-05-20 08:10:32 -07:00
|
|
|
success=True, message="hello", snapshot=snapshot
|
|
|
|
)
|
|
|
|
|
2022-06-30 13:39:01 -07:00
|
|
|
@routes.get("/api/component_activities")
|
|
|
|
async def get_component_activities(self, req) -> aiohttp.web.Response:
|
|
|
|
timeout = req.query.get("timeout", None)
|
|
|
|
if timeout and timeout.isdigit():
|
|
|
|
timeout = int(timeout)
|
|
|
|
else:
|
|
|
|
timeout = 5
|
|
|
|
|
2022-07-08 10:51:59 -07:00
|
|
|
# Get activity information for driver
|
2022-06-30 13:39:01 -07:00
|
|
|
driver_activity_info = await self._get_job_activity_info(timeout=timeout)
|
|
|
|
resp = {"driver": dataclasses.asdict(driver_activity_info)}
|
2022-07-08 10:51:59 -07:00
|
|
|
|
|
|
|
if RAY_CLUSTER_ACTIVITY_HOOK in os.environ:
|
|
|
|
try:
|
|
|
|
cluster_activity_callable = _load_class(
|
|
|
|
os.environ[RAY_CLUSTER_ACTIVITY_HOOK]
|
|
|
|
)
|
|
|
|
external_activity_output = cluster_activity_callable()
|
|
|
|
assert isinstance(external_activity_output, dict), (
|
|
|
|
f"Output of hook {os.environ[RAY_CLUSTER_ACTIVITY_HOOK]} "
|
|
|
|
"should be Dict[str, RayActivityResponse]. Got "
|
|
|
|
f"output: {external_activity_output}"
|
|
|
|
)
|
|
|
|
for component_type in external_activity_output:
|
|
|
|
try:
|
|
|
|
component_activity_output = external_activity_output[
|
|
|
|
component_type
|
|
|
|
]
|
|
|
|
# Cast output to type RayActivityResponse
|
|
|
|
component_activity_output = RayActivityResponse(
|
|
|
|
**dataclasses.asdict(component_activity_output)
|
|
|
|
)
|
|
|
|
# Validate is_active field is of type RayActivityStatus
|
|
|
|
component_activity_output.is_active = RayActivityStatus[
|
|
|
|
component_activity_output.is_active
|
|
|
|
]
|
|
|
|
resp[component_type] = dataclasses.asdict(
|
|
|
|
component_activity_output
|
|
|
|
)
|
|
|
|
except Exception as e:
|
|
|
|
logger.exception(
|
|
|
|
f"Failed to get activity status of {component_type} "
|
|
|
|
f"from user hook {os.environ[RAY_CLUSTER_ACTIVITY_HOOK]}."
|
|
|
|
)
|
|
|
|
resp[component_type] = {
|
|
|
|
"is_active": RayActivityStatus.ERROR,
|
|
|
|
"reason": repr(e),
|
|
|
|
"timestamp": datetime.now().timestamp(),
|
|
|
|
}
|
|
|
|
except Exception as e:
|
|
|
|
logger.exception(
|
|
|
|
"Failed to get activity status from user "
|
|
|
|
f"hook {os.environ[RAY_CLUSTER_ACTIVITY_HOOK]}."
|
|
|
|
)
|
|
|
|
resp["external_component"] = {
|
|
|
|
"is_active": RayActivityStatus.ERROR,
|
|
|
|
"reason": repr(e),
|
|
|
|
"timestamp": datetime.now().timestamp(),
|
|
|
|
}
|
|
|
|
|
2022-06-30 13:39:01 -07:00
|
|
|
return aiohttp.web.Response(
|
|
|
|
text=json.dumps(resp),
|
|
|
|
content_type="application/json",
|
|
|
|
status=aiohttp.web.HTTPOk.status_code,
|
|
|
|
)
|
|
|
|
|
|
|
|
async def _get_job_activity_info(self, timeout: int) -> RayActivityResponse:
|
|
|
|
# Returns if there is Ray activity from drivers (job).
|
|
|
|
# Drivers in namespaces that start with _ray_internal_job_info_ are not
|
|
|
|
# considered activity.
|
2022-07-08 10:51:59 -07:00
|
|
|
try:
|
|
|
|
request = gcs_service_pb2.GetAllJobInfoRequest()
|
|
|
|
reply = await self._gcs_job_info_stub.GetAllJobInfo(
|
|
|
|
request, timeout=timeout
|
|
|
|
)
|
|
|
|
|
|
|
|
num_active_drivers = 0
|
|
|
|
for job_table_entry in reply.job_info_list:
|
|
|
|
is_dead = bool(job_table_entry.is_dead)
|
|
|
|
in_internal_namespace = job_table_entry.config.ray_namespace.startswith(
|
|
|
|
JobInfoStorageClient.JOB_DATA_KEY_PREFIX
|
|
|
|
)
|
|
|
|
if not is_dead and not in_internal_namespace:
|
|
|
|
num_active_drivers += 1
|
|
|
|
|
|
|
|
is_active = (
|
|
|
|
RayActivityStatus.ACTIVE
|
|
|
|
if num_active_drivers > 0
|
|
|
|
else RayActivityStatus.INACTIVE
|
|
|
|
)
|
|
|
|
return RayActivityResponse(
|
|
|
|
is_active=is_active,
|
|
|
|
reason=f"Number of active drivers: {num_active_drivers}"
|
|
|
|
if num_active_drivers
|
|
|
|
else None,
|
|
|
|
timestamp=datetime.now().timestamp(),
|
|
|
|
)
|
|
|
|
except Exception as e:
|
|
|
|
logger.exception("Failed to get activity status of Ray drivers.")
|
|
|
|
return RayActivityResponse(
|
|
|
|
is_active=RayActivityStatus.ERROR,
|
|
|
|
reason=repr(e),
|
|
|
|
timestamp=datetime.now().timestamp(),
|
2022-06-30 13:39:01 -07:00
|
|
|
)
|
|
|
|
|
2022-02-22 16:18:16 -06:00
|
|
|
def _get_job_info(self, metadata: Dict[str, str]) -> Optional[JobInfo]:
|
2021-11-03 09:49:28 -05:00
|
|
|
# If a job submission ID has been added to a job, the status is
|
|
|
|
# guaranteed to be returned.
|
2021-11-18 10:15:23 -06:00
|
|
|
job_submission_id = metadata.get(JOB_ID_METADATA_KEY)
|
2022-02-22 16:18:16 -06:00
|
|
|
return self._job_info_client.get_info(job_submission_id)
|
2021-11-03 09:49:28 -05:00
|
|
|
|
2021-05-20 08:10:32 -07:00
|
|
|
async def get_job_info(self):
|
2022-02-18 07:54:37 -08:00
|
|
|
"""Return info for each job. Here a job is a Ray driver."""
|
2021-05-20 08:10:32 -07:00
|
|
|
request = gcs_service_pb2.GetAllJobInfoRequest()
|
|
|
|
reply = await self._gcs_job_info_stub.GetAllJobInfo(request, timeout=5)
|
|
|
|
|
|
|
|
jobs = {}
|
|
|
|
for job_table_entry in reply.job_info_list:
|
|
|
|
job_id = job_table_entry.job_id.hex()
|
2021-11-03 09:49:28 -05:00
|
|
|
metadata = dict(job_table_entry.config.metadata)
|
2021-05-20 08:10:32 -07:00
|
|
|
config = {
|
|
|
|
"namespace": job_table_entry.config.ray_namespace,
|
2021-11-03 09:49:28 -05:00
|
|
|
"metadata": metadata,
|
2022-02-28 16:18:10 +08:00
|
|
|
"runtime_env": RuntimeEnv.deserialize(
|
2021-11-25 01:01:01 +08:00
|
|
|
job_table_entry.config.runtime_env_info.serialized_runtime_env
|
|
|
|
),
|
2021-05-20 08:10:32 -07:00
|
|
|
}
|
2022-02-22 16:18:16 -06:00
|
|
|
info = self._get_job_info(metadata)
|
2021-05-20 08:10:32 -07:00
|
|
|
entry = {
|
2022-02-22 16:18:16 -06:00
|
|
|
"status": None if info is None else info.status,
|
|
|
|
"status_message": None if info is None else info.message,
|
2021-05-20 08:10:32 -07:00
|
|
|
"is_dead": job_table_entry.is_dead,
|
|
|
|
"start_time": job_table_entry.start_time,
|
|
|
|
"end_time": job_table_entry.end_time,
|
|
|
|
"config": config,
|
|
|
|
}
|
|
|
|
jobs[job_id] = entry
|
|
|
|
|
|
|
|
return jobs
|
|
|
|
|
2022-02-18 07:54:37 -08:00
|
|
|
async def get_job_submission_info(self):
|
|
|
|
"""Info for Ray job submission. Here a job can have 0 or many drivers."""
|
|
|
|
|
|
|
|
jobs = {}
|
|
|
|
|
2022-02-22 16:18:16 -06:00
|
|
|
for job_submission_id, job_info in self._job_info_client.get_all_jobs().items():
|
|
|
|
if job_info is not None:
|
2022-02-18 07:54:37 -08:00
|
|
|
entry = {
|
2022-04-29 08:10:24 -07:00
|
|
|
"job_submission_id": job_submission_id,
|
2022-02-22 16:18:16 -06:00
|
|
|
"status": job_info.status,
|
|
|
|
"message": job_info.message,
|
|
|
|
"error_type": job_info.error_type,
|
|
|
|
"start_time": job_info.start_time,
|
|
|
|
"end_time": job_info.end_time,
|
|
|
|
"metadata": job_info.metadata,
|
|
|
|
"runtime_env": job_info.runtime_env,
|
2022-03-16 20:02:22 -07:00
|
|
|
"entrypoint": job_info.entrypoint,
|
2022-02-18 07:54:37 -08:00
|
|
|
}
|
|
|
|
jobs[job_submission_id] = entry
|
|
|
|
return jobs
|
|
|
|
|
2021-05-20 08:10:32 -07:00
|
|
|
async def get_actor_info(self):
|
|
|
|
# TODO (Alex): GCS still needs to return actors from dead jobs.
|
|
|
|
request = gcs_service_pb2.GetAllActorInfoRequest()
|
|
|
|
request.show_dead_jobs = True
|
|
|
|
reply = await self._gcs_actor_info_stub.GetAllActorInfo(request, timeout=5)
|
|
|
|
actors = {}
|
|
|
|
for actor_table_entry in reply.actor_table_data:
|
|
|
|
actor_id = actor_table_entry.actor_id.hex()
|
|
|
|
runtime_env = json.loads(actor_table_entry.serialized_runtime_env)
|
|
|
|
entry = {
|
|
|
|
"job_id": actor_table_entry.job_id.hex(),
|
|
|
|
"state": gcs_pb2.ActorTableData.ActorState.Name(
|
|
|
|
actor_table_entry.state
|
|
|
|
),
|
|
|
|
"name": actor_table_entry.name,
|
|
|
|
"namespace": actor_table_entry.ray_namespace,
|
|
|
|
"runtime_env": runtime_env,
|
|
|
|
"start_time": actor_table_entry.start_time,
|
|
|
|
"end_time": actor_table_entry.end_time,
|
|
|
|
"is_detached": actor_table_entry.is_detached,
|
2022-04-13 03:24:26 +08:00
|
|
|
"resources": dict(actor_table_entry.required_resources),
|
2021-05-20 08:10:32 -07:00
|
|
|
"actor_class": actor_table_entry.class_name,
|
2021-05-21 09:26:37 -07:00
|
|
|
"current_worker_id": actor_table_entry.address.worker_id.hex(),
|
|
|
|
"current_raylet_id": actor_table_entry.address.raylet_id.hex(),
|
2021-05-20 08:10:32 -07:00
|
|
|
"ip_address": actor_table_entry.address.ip_address,
|
2021-08-13 09:49:12 -07:00
|
|
|
"port": actor_table_entry.address.port,
|
|
|
|
"metadata": dict(),
|
2021-05-20 08:10:32 -07:00
|
|
|
}
|
|
|
|
actors[actor_id] = entry
|
2021-08-13 09:49:12 -07:00
|
|
|
|
|
|
|
deployments = await self.get_serve_info()
|
2021-08-24 12:06:26 -07:00
|
|
|
for _, deployment_info in deployments.items():
|
2021-08-13 09:49:12 -07:00
|
|
|
for replica_actor_id, actor_info in deployment_info["actors"].items():
|
|
|
|
if replica_actor_id in actors:
|
|
|
|
serve_metadata = dict()
|
|
|
|
serve_metadata["replica_tag"] = actor_info["replica_tag"]
|
2021-08-24 12:06:26 -07:00
|
|
|
serve_metadata["deployment_name"] = deployment_info["name"]
|
2021-08-13 09:49:12 -07:00
|
|
|
serve_metadata["version"] = actor_info["version"]
|
|
|
|
actors[replica_actor_id]["metadata"]["serve"] = serve_metadata
|
2021-05-20 08:10:32 -07:00
|
|
|
return actors
|
|
|
|
|
2021-08-13 09:49:12 -07:00
|
|
|
async def get_serve_info(self) -> Dict[str, Any]:
|
2021-08-10 17:06:00 -07:00
|
|
|
# Conditionally import serve to prevent ModuleNotFoundError from serve
|
|
|
|
# dependencies when only ray[default] is installed (#17712)
|
|
|
|
try:
|
|
|
|
from ray.serve.constants import SERVE_CONTROLLER_NAME
|
2022-06-21 15:13:29 -07:00
|
|
|
from ray.serve.controller import SNAPSHOT_KEY as SERVE_SNAPSHOT_KEY
|
2021-08-10 17:06:00 -07:00
|
|
|
except Exception:
|
2021-08-13 09:49:12 -07:00
|
|
|
return {}
|
2021-08-10 17:06:00 -07:00
|
|
|
|
2021-08-06 15:03:29 -07:00
|
|
|
# Serve wraps Ray's internal KV store and specially formats the keys.
|
2021-08-11 20:26:54 -07:00
|
|
|
# These are the keys we are interested in:
|
|
|
|
# SERVE_CONTROLLER_NAME(+ optional random letters):SERVE_SNAPSHOT_KEY
|
2022-01-21 17:55:11 -08:00
|
|
|
# TODO: Convert to async GRPC, if CPU usage is not a concern.
|
|
|
|
def get_deployments():
|
|
|
|
serve_keys = _internal_kv_list(
|
|
|
|
SERVE_CONTROLLER_NAME, namespace=ray_constants.KV_NAMESPACE_SERVE
|
|
|
|
)
|
|
|
|
serve_snapshot_keys = filter(
|
|
|
|
lambda k: SERVE_SNAPSHOT_KEY in str(k), serve_keys
|
|
|
|
)
|
|
|
|
|
|
|
|
deployments_per_controller: List[Dict[str, Any]] = []
|
|
|
|
for key in serve_snapshot_keys:
|
|
|
|
val_bytes = _internal_kv_get(
|
|
|
|
key, namespace=ray_constants.KV_NAMESPACE_SERVE
|
|
|
|
) or "{}".encode("utf-8")
|
|
|
|
deployments_per_controller.append(json.loads(val_bytes.decode("utf-8")))
|
|
|
|
# Merge the deployments dicts of all controllers.
|
|
|
|
deployments: Dict[str, Any] = {
|
|
|
|
k: v for d in deployments_per_controller for k, v in d.items()
|
|
|
|
}
|
|
|
|
# Replace the keys (deployment names) with their hashes to prevent
|
|
|
|
# collisions caused by the automatic conversion to camelcase by the
|
|
|
|
# dashboard agent.
|
|
|
|
return {
|
|
|
|
hashlib.sha1(name.encode()).hexdigest(): info
|
|
|
|
for name, info in deployments.items()
|
|
|
|
}
|
2021-08-11 20:26:54 -07:00
|
|
|
|
2022-01-21 17:55:11 -08:00
|
|
|
return await asyncio.get_event_loop().run_in_executor(
|
|
|
|
executor=self._thread_pool, func=get_deployments
|
|
|
|
)
|
2021-08-06 15:03:29 -07:00
|
|
|
|
2021-06-02 15:07:06 -07:00
|
|
|
async def get_session_name(self):
|
2022-01-21 17:55:11 -08:00
|
|
|
# TODO(yic): Convert to async GRPC.
|
|
|
|
def get_session():
|
|
|
|
return ray.experimental.internal_kv._internal_kv_get(
|
|
|
|
"session_name", namespace=ray_constants.KV_NAMESPACE_SESSION
|
|
|
|
).decode()
|
|
|
|
|
|
|
|
return await asyncio.get_event_loop().run_in_executor(
|
|
|
|
executor=self._thread_pool, func=get_session
|
|
|
|
)
|
2021-06-02 15:07:06 -07:00
|
|
|
|
2021-05-20 08:10:32 -07:00
|
|
|
async def run(self, server):
|
|
|
|
self._gcs_job_info_stub = gcs_service_pb2_grpc.JobInfoGcsServiceStub(
|
|
|
|
self._dashboard_head.aiogrpc_gcs_channel
|
|
|
|
)
|
|
|
|
self._gcs_actor_info_stub = gcs_service_pb2_grpc.ActorInfoGcsServiceStub(
|
|
|
|
self._dashboard_head.aiogrpc_gcs_channel
|
|
|
|
)
|
2022-01-26 21:03:54 +09:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def is_minimal_module():
|
|
|
|
return False
|