Remove the old web UI (#4301)

This commit is contained in:
Philipp Moritz 2019-03-07 23:15:11 -08:00 committed by Robert Nishihara
parent 4c80177d6f
commit 95254b3d71
12 changed files with 6 additions and 976 deletions

View file

@ -507,7 +507,6 @@ filegroup(
"python/ray/dashboard/res/main.js",
"python/ray/experimental/*.py",
"python/ray/internal/*.py",
"python/ray/WebUI.ipynb",
"python/ray/workers/default_worker.py",
]),
)

View file

@ -75,7 +75,7 @@ for ((i=0; i<${#PY_VERSIONS[@]}; ++i)); do
$PIP_CMD install -q wheel
# Add the correct Python to the path and build the wheel. This is only
# needed so that the installation finds the cython executable.
INCLUDE_UI=1 PATH=$MACPYTHON_PY_PREFIX/$PY_MM/bin:$PATH $PYTHON_EXE setup.py bdist_wheel
PATH=$MACPYTHON_PY_PREFIX/$PY_MM/bin:$PATH $PYTHON_EXE setup.py bdist_wheel
mv dist/*.whl ../.whl/
popd
done

View file

@ -39,7 +39,7 @@ for ((i=0; i<${#PYTHONS[@]}; ++i)); do
# Fix the numpy version because this will be the oldest numpy version we can
# support.
/opt/python/${PYTHON}/bin/pip install -q numpy==${NUMPY_VERSION} cython==0.29.0
INCLUDE_UI=1 PATH=/opt/python/${PYTHON}/bin:$PATH /opt/python/${PYTHON}/bin/python setup.py bdist_wheel
PATH=/opt/python/${PYTHON}/bin:$PATH /opt/python/${PYTHON}/bin/python setup.py bdist_wheel
# In the future, run auditwheel here.
mv dist/*.whl ../.whl/
popd

View file

@ -1,97 +0,0 @@
{
"cells": [{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Ray UI\n", "\n",
"Start the UI with **Kernel -> Restart and Run All**."
]
}, {
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n", "import ray\n",
"import ray.experimental.ui as ui\n", "\n",
"ray.init(redis_address=os.environ[\"REDIS_ADDRESS\"])"
]
}, {
"cell_type": "markdown",
"metadata": {},
"source": ["#### Task trace timeline."]
}, {
"cell_type": "markdown",
"metadata": {},
"source": [
"To view arrows, go to View Options and select Flow Events."
]
}, {
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": ["ui.task_timeline()"]
}, {
"cell_type": "markdown",
"metadata": {},
"source": ["#### Object transfer timeline."]
}, {
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": ["ui.object_transfer_timeline()"]
}, {
"cell_type": "markdown",
"metadata": {},
"source": ["#### Task durations."]
}, {
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": ["ui.task_completion_time_distribution()"]
}, {
"cell_type": "markdown",
"metadata": {},
"source": ["#### CPU usage."]
}, {
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": ["ui.cpu_usage()"]
}, {
"cell_type": "markdown",
"metadata": {},
"source": ["#### Cluster usage."]
}, {
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": ["ui.cluster_usage()"]
}],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View file

@ -1,702 +0,0 @@
import logging
import numpy as np
import os
import pprint
import shutil
import tempfile
import time
import ipywidgets as widgets
from IPython.display import display, IFrame, clear_output
import ray
logger = logging.getLogger(__name__)
# Instances of this class maintains keep track of whether or not a
# callback is currently executing. Since the execution of the callback
# may trigger more calls to the callback, this is used to prevent infinite
# recursions.
class _EventRecursionContextManager(object):
def __init__(self):
self.should_recurse = True
def __enter__(self):
self.should_recurse = False
def __exit__(self, *args):
self.should_recurse = True
total_time_value = "% total time"
total_tasks_value = "% total tasks"
# Function that returns instances of sliders and handles associated events.
def get_sliders(update):
# Start_box value indicates the desired start point of queried window.
start_box = widgets.FloatText(
description="Start Time:",
disabled=True,
)
# End_box value indicates the desired end point of queried window.
end_box = widgets.FloatText(
description="End Time:",
disabled=True,
)
# Percentage slider. Indicates either % of total time or total tasks
# depending on what breakdown_opt is set to.
range_slider = widgets.IntRangeSlider(
value=[0, 100],
min=0,
max=100,
step=1,
description="%:",
continuous_update=False,
orientation="horizontal",
readout=True,
)
# Indicates the number of tasks that the user wants to be returned. Is
# disabled when the breakdown_opt value is set to total_time_value.
num_tasks_box = widgets.IntText(description="Num Tasks:", disabled=False)
# Dropdown bar that lets the user choose between modifying % of total
# time or total number of tasks.
breakdown_opt = widgets.Dropdown(
options=[total_time_value, total_tasks_value],
value=total_tasks_value,
description="Selection Options:")
# Display box for layout.
total_time_box = widgets.VBox([start_box, end_box])
# This sets the CSS style display to hide the box.
total_time_box.layout.display = 'none'
# Initially passed in to the update_wrapper function.
INIT_EVENT = "INIT"
# Create instance of context manager to determine whether callback is
# currently executing
out_recursion = _EventRecursionContextManager()
def update_wrapper(event):
# Feature received a callback, but it shouldn't be executed
# because the callback was the result of a different feature
# executing its callback based on user input.
if not out_recursion.should_recurse:
return
# Feature received a callback and it should be executed because
# the callback was the result of user input.
with out_recursion:
smallest, largest, num_tasks = ray.global_state._job_length()
diff = largest - smallest
if num_tasks != 0:
# Describes the initial values that the slider/text box
# values should be set to.
if event == INIT_EVENT:
if breakdown_opt.value == total_tasks_value:
num_tasks_box.value = -min(10000, num_tasks)
range_slider.value = (int(
100 - (100. * -num_tasks_box.value) / num_tasks),
100)
else:
low, high = map(lambda x: x / 100., range_slider.value)
start_box.value = round(diff * low, 2)
end_box.value = round(diff * high, 2)
# Event was triggered by a change in the start_box value.
elif event["owner"] == start_box:
if start_box.value > end_box.value:
start_box.value = end_box.value
elif start_box.value < 0:
start_box.value = 0
low, high = range_slider.value
range_slider.value = (int((start_box.value * 100.) / diff),
high)
# Event was triggered by a change in the end_box value.
elif event["owner"] == end_box:
if start_box.value > end_box.value:
end_box.value = start_box.value
elif end_box.value > diff:
end_box.value = diff
low, high = range_slider.value
range_slider.value = (low,
int((end_box.value * 100.) / diff))
# Event was triggered by a change in the breakdown options
# toggle.
elif event["owner"] == breakdown_opt:
if breakdown_opt.value == total_tasks_value:
start_box.disabled = True
end_box.disabled = True
num_tasks_box.disabled = False
total_time_box.layout.display = 'none'
# Make CSS display go back to the default settings.
num_tasks_box.layout.display = None
num_tasks_box.value = min(10000, num_tasks)
range_slider.value = (int(
100 - (100. * num_tasks_box.value) / num_tasks),
100)
else:
start_box.disabled = False
end_box.disabled = False
num_tasks_box.disabled = True
# Make CSS display go back to the default settings.
total_time_box.layout.display = None
num_tasks_box.layout.display = 'none'
range_slider.value = (
int((start_box.value * 100.) / diff),
int((end_box.value * 100.) / diff))
# Event was triggered by a change in the range_slider
# value.
elif event["owner"] == range_slider:
low, high = map(lambda x: x / 100., range_slider.value)
if breakdown_opt.value == total_tasks_value:
old_low, old_high = event["old"]
new_low, new_high = event["new"]
if old_low != new_low:
range_slider.value = (new_low, 100)
num_tasks_box.value = (
-(100. - new_low) / 100. * num_tasks)
else:
range_slider.value = (0, new_high)
num_tasks_box.value = new_high / 100. * num_tasks
else:
start_box.value = round(diff * low, 2)
end_box.value = round(diff * high, 2)
# Event was triggered by a change in the num_tasks_box
# value.
elif event["owner"] == num_tasks_box:
if num_tasks_box.value > 0:
range_slider.value = (
0, int(
100 * float(num_tasks_box.value) / num_tasks))
elif num_tasks_box.value < 0:
range_slider.value = (100 + int(
100 * float(num_tasks_box.value) / num_tasks), 100)
# Get updated values from a slider or text box, and update the rest of
# them accordingly.
range_slider.observe(update_wrapper, names="value")
breakdown_opt.observe(update_wrapper, names="value")
start_box.observe(update_wrapper, names="value")
end_box.observe(update_wrapper, names="value")
num_tasks_box.observe(update_wrapper, names="value")
# Initializes the sliders
update_wrapper(INIT_EVENT)
# Display sliders and search boxes
display(breakdown_opt,
widgets.HBox([range_slider, total_time_box, num_tasks_box]))
# Return the sliders and text boxes
return start_box, end_box, range_slider, breakdown_opt
def object_search_bar():
object_search = widgets.Text(
value="",
placeholder="Object ID",
description="Search for an object:",
disabled=False)
display(object_search)
def handle_submit(sender):
pp = pprint.PrettyPrinter()
pp.pprint(ray.global_state.object_table(object_search.value))
object_search.on_submit(handle_submit)
def task_search_bar():
task_search = widgets.Text(
value="",
placeholder="Task ID",
description="Search for a task:",
disabled=False)
display(task_search)
def handle_submit(sender):
pp = pprint.PrettyPrinter()
pp.pprint(ray.global_state.task_table(task_search.value))
task_search.on_submit(handle_submit)
# Hard limit on the number of tasks to return to the UI client at once
MAX_TASKS_TO_VISUALIZE = 10000
# Helper function that guarantees unique and writeable temp files.
# Prevents clashes in task trace files when multiple notebooks are running.
def _get_temp_file_path(**kwargs):
temp_file = tempfile.NamedTemporaryFile(
delete=False, dir=os.getcwd(), **kwargs)
temp_file_path = temp_file.name
temp_file.close()
return os.path.relpath(temp_file_path)
def task_timeline():
# Check that the trace viewer renderer file is present, and copy it to the
# current working directory if it is not present.
if not os.path.exists("trace_viewer_full.html"):
shutil.copy(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../core/src/catapult_files/trace_viewer_full.html"),
"trace_viewer_full.html")
trace_viewer_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../core/src/catapult_files/index.html")
html_file_path = _get_temp_file_path(suffix=".html")
json_file_path = _get_temp_file_path(suffix=".json")
ray.global_state.chrome_tracing_dump(filename=json_file_path)
with open(trace_viewer_path) as f:
data = f.read()
# Replace the demo data path with our own
# https://github.com/catapult-project/catapult/blob/
# 33a9271eb3cf5caf925293ec6a4b47c94f1ac968/tracing/bin/index.html#L107
data = data.replace("../test_data/big_trace.json", json_file_path)
with open(html_file_path, "w+") as f:
f.write(data)
# Display the task trace within the Jupyter notebook
clear_output(wait=True)
logger.info("To view fullscreen, open chrome://tracing in Google Chrome "
"and load `{}`".format(os.path.abspath(json_file_path)))
display(IFrame(html_file_path, 900, 800))
def object_transfer_timeline():
# Check that the trace viewer renderer file is present, and copy it to the
# current working directory if it is not present.
if not os.path.exists("trace_viewer_full.html"):
shutil.copy(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../core/src/catapult_files/trace_viewer_full.html"),
"trace_viewer_full.html")
trace_viewer_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../core/src/catapult_files/index.html")
html_file_path = _get_temp_file_path(suffix=".html")
json_file_path = _get_temp_file_path(suffix=".json")
ray.global_state.chrome_tracing_object_transfer_dump(
filename=json_file_path)
with open(trace_viewer_path) as f:
data = f.read()
# Replace the demo data path with our own
# https://github.com/catapult-project/catapult/blob/
# 33a9271eb3cf5caf925293ec6a4b47c94f1ac968/tracing/bin/index.html#L107
data = data.replace("../test_data/big_trace.json", json_file_path)
with open(html_file_path, "w+") as f:
f.write(data)
# Display the task trace within the Jupyter notebook
clear_output(wait=True)
logger.info("To view fullscreen, open chrome://tracing in Google Chrome "
"and load `{}`".format(os.path.abspath(json_file_path)))
display(IFrame(html_file_path, 900, 800))
def task_completion_time_distribution():
from bokeh.models import ColumnDataSource
from bokeh.layouts import gridplot
from bokeh.plotting import figure, show, helpers
from bokeh.io import output_notebook, push_notebook
from bokeh.resources import CDN
output_notebook(resources=CDN)
# Create the Bokeh plot
p = figure(
title="Task Completion Time Distribution",
tools=["save", "hover", "wheel_zoom", "box_zoom", "pan"],
background_fill_color="#FFFFFF",
x_range=(0, 1),
y_range=(0, 1))
# Create the data source that the plot pulls from
source = ColumnDataSource(data={"top": [], "left": [], "right": []})
# Plot the histogram rectangles
p.quad(
top="top",
bottom=0,
left="left",
right="right",
source=source,
fill_color="#B3B3B3",
line_color="#033649")
# Label the plot axes
p.xaxis.axis_label = "Duration in seconds"
p.yaxis.axis_label = "Number of tasks"
handle = show(
gridplot(
p,
ncols=1,
plot_width=500,
plot_height=500,
toolbar_location="below"),
notebook_handle=True)
# Function to update the plot
def task_completion_time_update(abs_earliest, abs_latest, abs_num_tasks,
tasks):
if len(tasks) == 0:
return
# Create the distribution to plot
distr = []
for task_id, data in tasks.items():
distr.append(data["store_outputs_end"] -
data["get_arguments_start"])
# Create a histogram from the distribution
top, bin_edges = np.histogram(distr, bins="auto")
left = bin_edges[:-1]
right = bin_edges[1:]
source.data = {"top": top, "left": left, "right": right}
# Set the x and y ranges
x_range = (min(left) if len(left) else 0, max(right)
if len(right) else 1)
y_range = (0, max(top) + 1 if len(top) else 1)
x_range = helpers._get_range(x_range)
p.x_range.start = x_range.start
p.x_range.end = x_range.end
y_range = helpers._get_range(y_range)
p.y_range.start = y_range.start
p.y_range.end = y_range.end
# Push updates to the plot
push_notebook(handle=handle)
get_sliders(task_completion_time_update)
def compute_utilizations(abs_earliest,
abs_latest,
num_tasks,
tasks,
num_buckets,
use_abs_times=False):
if len(tasks) == 0:
return [], [], []
if use_abs_times:
earliest_time = abs_earliest
latest_time = abs_latest
else:
# Determine what the earliest and latest tasks are out of the ones
# that are passed in
earliest_time = time.time()
latest_time = 0
for task_id, data in tasks.items():
latest_time = max((latest_time, data["store_outputs_end"]))
earliest_time = min((earliest_time, data["get_arguments_start"]))
# Add some epsilon to latest_time to ensure that the end time of the
# last task falls __within__ a bucket, and not on the edge
latest_time += 1e-6
# Compute average CPU utilization per time bucket by summing
# cpu-time per bucket
bucket_time_length = (latest_time - earliest_time) / float(num_buckets)
cpu_time = [0 for _ in range(num_buckets)]
for data in tasks.values():
task_start_time = data["get_arguments_start"]
task_end_time = data["store_outputs_end"]
start_bucket = int(
(task_start_time - earliest_time) / bucket_time_length)
end_bucket = int((task_end_time - earliest_time) / bucket_time_length)
# Walk over each time bucket that this task intersects, adding the
# amount of time that the task intersects within each bucket
for bucket_idx in range(start_bucket, end_bucket + 1):
bucket_start_time = (
(earliest_time + bucket_idx) * bucket_time_length)
bucket_end_time = (
(earliest_time + (bucket_idx + 1)) * bucket_time_length)
task_start_time_within_bucket = max(task_start_time,
bucket_start_time)
task_end_time_within_bucket = min(task_end_time, bucket_end_time)
task_cpu_time_within_bucket = (
task_end_time_within_bucket - task_start_time_within_bucket)
if bucket_idx > -1 and bucket_idx < num_buckets:
cpu_time[bucket_idx] += task_cpu_time_within_bucket
# Cpu_utilization is the average cpu utilization of the bucket, which
# is just cpu_time divided by bucket_time_length.
cpu_utilization = list(
map(lambda x: x / float(bucket_time_length), cpu_time))
# Generate histogram bucket edges. Subtract out abs_earliest to get
# relative time.
all_edges = [
earliest_time - abs_earliest + i * bucket_time_length
for i in range(num_buckets + 1)
]
# Left edges are all but the rightmost edge, right edges are all but
# the leftmost edge.
left_edges = all_edges[:-1]
right_edges = all_edges[1:]
return left_edges, right_edges, cpu_utilization
def cpu_usage():
from bokeh.layouts import gridplot
from bokeh.plotting import figure, show, helpers
from bokeh.resources import CDN
from bokeh.io import output_notebook, push_notebook
from bokeh.models import ColumnDataSource
output_notebook(resources=CDN)
# Parse the client table to determine how many CPUs are available
num_cpus = ray.global_state.cluster_resources()["CPU"]
# Update the plot based on the sliders
def plot_utilization():
# Create the Bokeh plot
time_series_fig = figure(
title="CPU Utilization",
tools=["save", "hover", "wheel_zoom", "box_zoom", "pan"],
background_fill_color="#FFFFFF",
x_range=[0, 1],
y_range=[0, 1])
# Create the data source that the plot will pull from
time_series_source = ColumnDataSource(data={
'left': [],
'right': [],
'top': []
})
# Plot the rectangles representing the distribution
time_series_fig.quad(
left="left",
right="right",
top="top",
bottom=0,
source=time_series_source,
fill_color="#B3B3B3",
line_color="#033649")
# Label the plot axes
time_series_fig.xaxis.axis_label = "Time in seconds"
time_series_fig.yaxis.axis_label = "Number of CPUs used"
handle = show(
gridplot(
time_series_fig,
ncols=1,
plot_width=500,
plot_height=500,
toolbar_location="below"),
notebook_handle=True)
def update_plot(abs_earliest, abs_latest, abs_num_tasks, tasks):
num_buckets = 100
left, right, top = compute_utilizations(
abs_earliest, abs_latest, abs_num_tasks, tasks, num_buckets)
time_series_source.data = {
"left": left,
"right": right,
"top": top
}
x_range = (max(0, min(left)) if len(left) else 0, max(right)
if len(right) else 1)
y_range = (0, max(top) + 1 if len(top) else 1)
# Define the axis ranges
x_range = helpers._get_range(x_range)
time_series_fig.x_range.start = x_range.start
time_series_fig.x_range.end = x_range.end
y_range = helpers._get_range(y_range)
time_series_fig.y_range.start = y_range.start
time_series_fig.y_range.end = num_cpus
# Push the updated data to the notebook
push_notebook(handle=handle)
get_sliders(update_plot)
plot_utilization()
# Function to create the cluster usage "heat map"
def cluster_usage():
from bokeh.io import show, output_notebook, push_notebook
from bokeh.resources import CDN
from bokeh.plotting import figure
from bokeh.models import (
ColumnDataSource,
HoverTool,
LinearColorMapper,
BasicTicker,
ColorBar,
)
output_notebook(resources=CDN)
# Initial values
source = ColumnDataSource(
data={
"node_ip_address": ['127.0.0.1'],
"time": ['0.5'],
"num_tasks": ['1'],
"length": [1]
})
# Define the color schema
colors = [
"#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce", "#ddb7b1",
"#cc7878", "#933b41", "#550b1d"
]
mapper = LinearColorMapper(palette=colors, low=0, high=2)
TOOLS = "hover, save, xpan, box_zoom, reset, xwheel_zoom"
# Create the plot
p = figure(
title="Cluster Usage",
y_range=list(set(source.data['node_ip_address'])),
x_axis_location="above",
plot_width=900,
plot_height=500,
tools=TOOLS,
toolbar_location='below')
# Format the plot axes
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "10pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = np.pi / 3
# Plot rectangles
p.rect(
x="time",
y="node_ip_address",
width="length",
height=1,
source=source,
fill_color={
"field": "num_tasks",
"transform": mapper
},
line_color=None)
# Add legend to the side of the plot
color_bar = ColorBar(
color_mapper=mapper,
major_label_text_font_size="8pt",
ticker=BasicTicker(desired_num_ticks=len(colors)),
label_standoff=6,
border_line_color=None,
location=(0, 0))
p.add_layout(color_bar, "right")
# Define hover tool
p.select_one(HoverTool).tooltips = [("Node IP Address",
"@node_ip_address"),
("Number of tasks running",
"@num_tasks"), ("Time", "@time")]
# Define the axis labels
p.xaxis.axis_label = "Time in seconds"
p.yaxis.axis_label = "Node IP Address"
handle = show(p, notebook_handle=True)
workers = ray.global_state.workers()
# Function to update the heat map
def heat_map_update(abs_earliest, abs_latest, abs_num_tasks, tasks):
if len(tasks) == 0:
return
earliest = time.time()
latest = 0
node_to_tasks = {}
# Determine which task has the earlest start time out of the ones
# passed into the update function
for task_id, data in tasks.items():
if data["score"] > latest:
latest = data["score"]
if data["score"] < earliest:
earliest = data["score"]
worker_id = data["worker_id"]
node_ip = workers[worker_id]["node_ip_address"]
if node_ip not in node_to_tasks:
node_to_tasks[node_ip] = {}
node_to_tasks[node_ip][task_id] = data
nodes = []
times = []
lengths = []
num_tasks = []
for node_ip, task_dict in node_to_tasks.items():
left, right, top = compute_utilizations(
earliest, latest, abs_num_tasks, task_dict, 100, True)
for (l, r, t) in zip(left, right, top):
nodes.append(node_ip)
times.append((l + r) / 2)
lengths.append(r - l)
num_tasks.append(t)
# Set the y range of the plot to be the node IP addresses
p.y_range.factors = list(set(nodes))
mapper.low = min(min(num_tasks), 0)
mapper.high = max(max(num_tasks), 1)
# Update plot with new data based on slider and text box values
source.data = {
"node_ip_address": nodes,
"time": times,
"num_tasks": num_tasks,
"length": lengths
}
push_notebook(handle=handle)
get_sliders(heat_map_update)

View file

@ -319,22 +319,6 @@ class Node(object):
redis_client = self.create_redis_client()
redis_client.hmset("webui", {"url": self._webui_url})
def start_ui(self):
"""Start the web UI."""
stdout_file, stderr_file = self.new_log_files("webui")
notebook_name = self._make_inc_temp(
suffix=".ipynb", prefix="ray_ui", directory_name=self._temp_dir)
_, process_info = ray.services.start_ui(
self._redis_address,
notebook_name,
stdout_file=stdout_file,
stderr_file=stderr_file)
assert ray_constants.PROCESS_TYPE_WEB_UI not in self.all_processes
if process_info is not None:
self.all_processes[ray_constants.PROCESS_TYPE_WEB_UI] = [
process_info
]
def start_plasma_store(self):
"""Start the plasma store."""
assert self._plasma_store_socket_name is None

View file

@ -56,7 +56,7 @@ class RayParams(object):
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which is a Jupyter notebook.
UI, which displays the status of the Ray cluster.
logging_level: Logging level, default will be logging.INFO.
logging_format: Logging format, default contains a timestamp,
filename, line number, and message. See ray_constants.py.

View file

@ -10,7 +10,6 @@ import multiprocessing
import os
import random
import resource
import shutil
import socket
import subprocess
import sys
@ -943,75 +942,6 @@ def start_dashboard(redis_address,
return dashboard_url, process_info
def start_ui(redis_address, notebook_name, stdout_file=None, stderr_file=None):
"""Start a UI process.
Args:
redis_address: The address of the primary Redis shard.
notebook_name: The destination of the notebook file.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
Returns:
A tuple of the web UI url and ProcessInfo for the process that was
started.
"""
port = 8888
while True:
try:
port_test_socket = socket.socket()
port_test_socket.bind(("127.0.0.1", port))
port_test_socket.close()
break
except socket.error:
port += 1
notebook_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "WebUI.ipynb")
# We copy the notebook file so that the original doesn't get modified by
# the user.
shutil.copy(notebook_filepath, notebook_name)
new_notebook_directory = os.path.dirname(notebook_name)
# We generate the token used for authentication ourselves to avoid
# querying the jupyter server.
token = ray.utils.decode(binascii.hexlify(os.urandom(24)))
# The --ip=0.0.0.0 flag is intended to enable connecting to a notebook
# running within a docker container (from the outside).
command = [
"jupyter", "notebook", "--no-browser", "--port={}".format(port),
"--ip=0.0.0.0", "--NotebookApp.iopub_data_rate_limit=10000000000",
"--NotebookApp.open_browser=False",
"--NotebookApp.token={}".format(token)
]
# If the user is root, add the --allow-root flag.
if os.geteuid() == 0:
command.append("--allow-root")
try:
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_WEB_UI,
env_updates={"REDIS_ADDRESS": redis_address},
cwd=new_notebook_directory,
stdout_file=stdout_file,
stderr_file=stderr_file)
except Exception:
logger.warning("Failed to start the UI, you may need to run "
"'pip install jupyter'.")
else:
webui_url = ("http://localhost:{}/notebooks/{}?token={}".format(
port, os.path.basename(notebook_name), token))
print("\n" + "=" * 70)
print("View the web UI at {}".format(webui_url))
print("=" * 70 + "\n")
return webui_url, process_info
return None, None
def check_and_update_resources(num_cpus, num_gpus, resources):
"""Sanity check a resource dictionary and add sensible defaults.

View file

@ -1342,7 +1342,7 @@ def init(redis_address=None,
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which is a Jupyter notebook.
UI, which displays the status of the Ray cluster.
driver_id: The ID of driver.
configure_logging: True if allow the logging cofiguration here.
Otherwise, the users may want to configure it by their own.

View file

@ -24,9 +24,8 @@ ray_files = [
"ray/core/src/ray/gcs/redis_module/libray_redis_module.so",
"ray/core/src/plasma/plasma_store_server", "ray/_raylet.so",
"ray/core/src/ray/raylet/raylet_monitor", "ray/core/src/ray/raylet/raylet",
"ray/WebUI.ipynb", "ray/dashboard/dashboard.py",
"ray/dashboard/index.html", "ray/dashboard/res/main.css",
"ray/dashboard/res/main.js"
"ray/dashboard/dashboard.py", "ray/dashboard/index.html",
"ray/dashboard/res/main.css", "ray/dashboard/res/main.js"
]
# These are the directories where automatically generated Python flatbuffer
@ -38,11 +37,6 @@ generated_python_directories = [
optional_ray_files = []
ray_ui_files = [
"ray/core/src/catapult_files/index.html",
"ray/core/src/catapult_files/trace_viewer_full.html"
]
ray_autoscaler_files = [
"ray/autoscaler/aws/example-full.yaml",
"ray/autoscaler/gcp/example-full.yaml",
@ -56,13 +50,6 @@ if "RAY_USE_NEW_GCS" in os.environ and os.environ["RAY_USE_NEW_GCS"] == "on":
"ray/core/src/credis/redis/src/redis-server"
]
# The UI files are mandatory if the INCLUDE_UI environment variable equals 1.
# Otherwise, they are optional.
if "INCLUDE_UI" in os.environ and os.environ["INCLUDE_UI"] == "1":
ray_files += ray_ui_files
else:
optional_ray_files += ray_ui_files
optional_ray_files += ray_autoscaler_files
extras = {

View file

@ -1,65 +0,0 @@
#!/bin/bash
set -x
# Cause the script to exit if a single command fails.
set -e
TP_DIR=$(cd "$(dirname "${BASH_SOURCE:-$0}")"; pwd)/../
CATAPULT_COMMIT=18cd334755701cf0c3b90b7172126c686d2eb787
CATAPULT_HOME=$TP_DIR/pkg/catapult
VULCANIZE_BIN=$CATAPULT_HOME/tracing/bin/vulcanize_trace_viewer
CATAPULT_FILES=$TP_DIR/../build/src/catapult_files
# This is where we will copy the files that need to be packaged with the wheels.
mkdir -p $CATAPULT_FILES
if [[ "$INCLUDE_UI" == "0" ]]; then
# Let installation continue without building the UI.
exit 0
fi
if ! type python2 > /dev/null; then
echo "cannot properly set up UI without a python2 executable"
if [[ "$INCLUDE_UI" == "1" ]]; then
# Since the UI is explicitly supposed to be included, fail here.
exit 1
else
# Let installation continue without building the UI.
exit 0
fi
fi
# Download catapult and use it to autogenerate some static html if it isn't
# already present.
if [[ ! -d $CATAPULT_HOME ]]; then
echo "setting up catapult"
# The git clone command seems to fail in Travis, so retry up to 20 times.
for COUNT in {1..20}; do
# Attempt to git clone catapult and break from the retry loop if it succeeds.
git clone -q https://github.com/ray-project/catapult.git $CATAPULT_HOME && break
# If none of the retries succeeded at getting boost, then fail.
if [[ $COUNT == 20 ]]; then
exit 1
fi
done
fi
REBUILD=off
# Check out the appropriate commit from catapult.
pushd $CATAPULT_HOME
if [ "$CATAPULT_COMMIT" != `git rev-parse HEAD` ]; then
git fetch origin master
git checkout $CATAPULT_COMMIT
REBUILD=on
fi
popd
# If the autogenerated catapult files aren't present, then generate them.
if [[ ! -f $CATAPULT_FILES/index.html || "$REBUILD" == "on" ]]; then
python2 $VULCANIZE_BIN --config chrome --output $CATAPULT_FILES/trace_viewer_full.html
cp $CATAPULT_HOME/tracing/bin/index.html $CATAPULT_FILES/index.html
fi

View file

@ -57,12 +57,6 @@ bash "$TP_SCRIPT_DIR/build_credis.sh"
#RAY_BUILD_JAVA=$RAY_BUILD_JAVA \
#bash "$TP_SCRIPT_DIR/build_arrow.sh" $PYTHON_EXECUTABLE
##############################################
# catapult
##############################################
# Clone catapult and build the static HTML needed for the UI.
bash "$TP_SCRIPT_DIR/build_ui.sh"
##############################################
# rDSN (optional)
##############################################