diff --git a/release/benchmarks/distributed/test_many_actors.py b/release/benchmarks/distributed/test_many_actors.py index 94c3ee21f..cf6c2fdaa 100644 --- a/release/benchmarks/distributed/test_many_actors.py +++ b/release/benchmarks/distributed/test_many_actors.py @@ -5,10 +5,12 @@ import ray._private.test_utils as test_utils import time import tqdm +is_smoke_test = True if "SMOKE_TEST" in os.environ: MAX_ACTORS_IN_CLUSTER = 100 else: MAX_ACTORS_IN_CLUSTER = 10000 + is_smoke_test = False def test_max_actors(): @@ -66,4 +68,12 @@ if "TEST_OUTPUT_JSON" in os.environ: "_peak_memory": round(used_gb, 2), "_peak_process_memory": usage, } + if not is_smoke_test: + results["perf_metrics"] = [ + { + "perf_metric_name": "actors_per_second", + "perf_metric_value": rate, + "perf_metric_type": "THROUGHPUT", + } + ] json.dump(results, out_file) diff --git a/release/benchmarks/distributed/test_many_pgs.py b/release/benchmarks/distributed/test_many_pgs.py index 268607968..e1ae74fc8 100644 --- a/release/benchmarks/distributed/test_many_pgs.py +++ b/release/benchmarks/distributed/test_many_pgs.py @@ -6,10 +6,12 @@ from ray.util.placement_group import placement_group, remove_placement_group import time import tqdm +is_smoke_test = True if "SMOKE_TEST" in os.environ: MAX_PLACEMENT_GROUPS = 20 else: MAX_PLACEMENT_GROUPS = 1000 + is_smoke_test = False def test_many_placement_groups(): @@ -92,4 +94,12 @@ if "TEST_OUTPUT_JSON" in os.environ: "_peak_memory": round(used_gb, 2), "_peak_process_memory": usage, } + if not is_smoke_test: + results["perf_metrics"] = [ + { + "perf_metric_name": "pgs_per_second", + "perf_metric_value": rate, + "perf_metric_type": "THROUGHPUT", + } + ] json.dump(results, out_file) diff --git a/release/benchmarks/object_store/test_object_store.py b/release/benchmarks/object_store/test_object_store.py index ce938fcfd..8ab5ced19 100644 --- a/release/benchmarks/object_store/test_object_store.py +++ b/release/benchmarks/object_store/test_object_store.py @@ -62,4 +62,12 @@ if "TEST_OUTPUT_JSON" in os.environ: "num_nodes": NUM_NODES, "success": "1", } + perf_metric_name = f"time_to_broadcast_{OBJECT_SIZE}_bytes_to_{NUM_NODES}_nodes" + results["perf_metrics"] = [ + { + "perf_metric_name": perf_metric_name, + "perf_metric_value": end - start, + "perf_metric_type": "LATENCY", + } + ] json.dump(results, out_file) diff --git a/release/benchmarks/single_node/test_single_node.py b/release/benchmarks/single_node/test_single_node.py index 02ed1efbf..dacd292df 100644 --- a/release/benchmarks/single_node/test_single_node.py +++ b/release/benchmarks/single_node/test_single_node.py @@ -206,4 +206,31 @@ if "TEST_OUTPUT_JSON" in os.environ: "large_object_size": MAX_RAY_GET_SIZE, "success": "1", } + results["perf_metrics"] = [ + { + "perf_metric_name": f"{MAX_ARGS}_args_time", + "perf_metric_value": args_time, + "perf_metric_type": "LATENCY", + }, + { + "perf_metric_name": f"{MAX_RETURNS}_returns_time", + "perf_metric_value": returns_time, + "perf_metric_type": "LATENCY", + }, + { + "perf_metric_name": f"{MAX_RAY_GET_ARGS}_get_time", + "perf_metric_value": get_time, + "perf_metric_type": "LATENCY", + }, + { + "perf_metric_name": f"{MAX_QUEUED_TASKS}_queued_time", + "perf_metric_value": queued_time, + "perf_metric_type": "LATENCY", + }, + { + "perf_metric_name": f"{MAX_RAY_GET_SIZE}_large_object_time", + "perf_metric_value": large_object_time, + "perf_metric_type": "LATENCY", + }, + ] json.dump(results, out_file)