Revert "Revert "Bump pytest from 5.4.3 to 7.0.1"" (#26525)

* Revert "Revert "Bump pytest from 5.4.3 to 7.0.1""

This reverts commit ab10890e90.

Signed-off-by: Riatre Foo <foo@riat.re>

* Fix missing test data files dependency in rllib/BUILD

See # 26334 and # 26517 for context.

Once this is in, it should be good to roll-forwrad again.

Signed-off-by: Riatre Foo <foo@riat.re>

* debug: run all tests

Signed-off-by: Riatre Foo <foo@riat.re>

* Revert "debug: run all tests"

This reverts commit 0c5e796b0eb437d64922f66749c61b0412486970.

Signed-off-by: Riatre Foo <foo@riat.re>

* fix new tests since last rebase

Signed-off-by: Riatre Foo <foo@riat.re>
This commit is contained in:
Riatre 2022-07-19 12:21:19 +08:00 committed by GitHub
parent a435a04ead
commit 591cd22be7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
24 changed files with 232 additions and 110 deletions

View file

@ -3,7 +3,7 @@ load("@bazel_skylib//lib:paths.bzl", "paths")
# py_test_module_list creates a py_test target for each
# Python file in `files`
def py_test_module_list(files, size, deps, extra_srcs, name_suffix="", **kwargs):
def py_test_module_list(files, size, deps, extra_srcs=[], name_suffix="", **kwargs):
for file in files:
# remove .py
name = paths.split_extension(file)[0] + name_suffix
@ -14,6 +14,7 @@ def py_test_module_list(files, size, deps, extra_srcs, name_suffix="", **kwargs)
size = size,
main = file,
srcs = extra_srcs + [file],
deps = deps,
**kwargs
)

View file

@ -76,7 +76,7 @@ if [[ "$platform" == "linux" ]]; then
"$PYTHON_EXE" -u -c "import ray; print(ray.__commit__)" | grep "$TRAVIS_COMMIT" || (echo "ray.__commit__ not set properly!" && exit 1)
# Install the dependencies to run the tests.
"$PIP_CMD" install -q aiohttp aiosignal frozenlist grpcio pytest==5.4.3 requests proxy.py
"$PIP_CMD" install -q aiohttp aiosignal frozenlist grpcio 'pytest==7.0.1' requests proxy.py
# Run a simple test script to make sure that the wheel works.
for SCRIPT in "${TEST_SCRIPTS[@]}"; do
@ -117,7 +117,7 @@ elif [[ "$platform" == "macosx" ]]; then
"$PIP_CMD" install -q "$PYTHON_WHEEL"
# Install the dependencies to run the tests.
"$PIP_CMD" install -q aiohttp aiosignal frozenlist grpcio pytest==5.4.3 requests proxy.py
"$PIP_CMD" install -q aiohttp aiosignal frozenlist grpcio 'pytest==7.0.1' requests proxy.py
# Run a simple test script to make sure that the wheel works.
for SCRIPT in "${TEST_SCRIPTS[@]}"; do

View file

@ -33,11 +33,11 @@ eval "${WORKSPACE_DIR}/ci/ci.sh build"
# Install test requirements
python -m pip install -U \
pytest==5.4.3 \
pytest==7.0.1 \
numpy
# Train requirements.
# TODO: make this dynamic
if [ "${TRAIN_MINIMAL_INSTALL-}" = 1 ]; then
python -m pip install -U "ray[tune]"
fi
fi

View file

@ -324,7 +324,5 @@ py_test_module_list(
"small_size_python_tests",
"team:core",
],
deps = [
":ray_api",
],
deps = [],
)

View file

@ -10,6 +10,12 @@ py_library(
),
)
py_library(
name = "conftest",
srcs = ["tests/conftest.py"],
deps = ["//python/ray/tests:conftest"],
)
py_test_run_all_subdirectory(
size = "medium",
include = ["**/test*.py"],
@ -20,6 +26,15 @@ py_test_run_all_subdirectory(
"tests/test_state_head.py"
],
extra_srcs = [],
data = [
"modules/job/tests/backwards_compatibility_scripts/test_backwards_compatibility.sh",
"modules/job/tests/pip_install_test-0.5-py3-none-any.whl",
"modules/snapshot/snapshot_schema.json",
"modules/tests/test_config_files/basic_runtime_env.yaml",
] + glob([
"modules/job/tests/subprocess_driver_scripts/*.py",
]),
deps = [":conftest"],
tags = ["exclusive", "team:serve"],
)
@ -27,6 +42,7 @@ py_test(
name = "test_node",
size = "medium",
srcs = ["modules/node/tests/test_node.py"],
deps = [":conftest"],
tags = ["exclusive", "team:serve"],
)
@ -34,6 +50,7 @@ py_test(
name = "test_dashboard",
size = "medium",
srcs = ["tests/test_dashboard.py"],
deps = [":conftest"],
tags = ["exclusive", "team:serve"],
)

View file

@ -0,0 +1,5 @@
filegroup(
name = "example",
data = glob(["example-*.yaml"]),
visibility = ["//python/ray/tests:__pkg__"],
)

View file

@ -0,0 +1,5 @@
filegroup(
name = "example",
data = glob(["example-*.yaml"]),
visibility = ["//python/ray/tests:__pkg__"],
)

View file

@ -0,0 +1,5 @@
filegroup(
name = "example",
data = glob(["example-*.yaml"]),
visibility = ["//python/ray/tests:__pkg__"],
)

View file

@ -0,0 +1,5 @@
filegroup(
name = "example",
data = glob(["example-*.yaml"]),
visibility = ["//python/ray/tests:__pkg__"],
)

View file

@ -5,28 +5,38 @@
# --------------------------------------------------------------------
load("//bazel:python.bzl", "py_test_module_list")
SRCS = [] + select({
"@bazel_tools//src/conditions:windows": glob([
"**/conftest.py",
]),
"//conditions:default": [],
})
py_library(
name = "conftest",
srcs = ["tests/conftest.py"],
deps = ["//python/ray/tests:conftest"],
)
py_test(
name = "test_preprocessors",
size = "small",
srcs = ["tests/test_preprocessors.py"],
tags = ["team:ml", "exclusive", "ray_air"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test(
name = "test_dataset_formats",
size = "large",
srcs = ["tests/test_dataset_formats.py"],
data = glob(["tests/image-folder/**/*"]),
tags = ["team:core", "exclusive"],
deps = ["//:ray_lib", ":conftest"],
)
py_test_module_list(
files = glob(
include=["tests/test_*.py"],
exclude=["tests/test_preprocessors.py"]
exclude=[
"tests/test_preprocessors.py",
"tests/test_dataset_formats.py",
],
),
size = "large",
extra_srcs = SRCS,
tags = ["team:core", "exclusive"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)

View file

@ -0,0 +1,9 @@
filegroup(
name = "example_pkg",
data = [
"ray_pkg.yaml",
] + glob([
"my_pkg/**/*.py",
]),
visibility = ["//python/ray/tests:__pkg__"],
)

View file

@ -12,6 +12,11 @@ py_library(
serve_tests_srcs = glob(["tests/**/*.py"])
filegroup(
name = "test_config_files",
data = glob(["tests/test_config_files/**/*"]),
)
py_test(
name = "test_api",
size = "medium",
@ -321,6 +326,7 @@ py_test(
srcs = serve_tests_srcs,
tags = ["exclusive", "team:serve"],
deps = [":serve_lib"],
data = [":test_config_files"],
)
py_test(

View file

@ -1,23 +1,31 @@
load("//bazel:python.bzl", "py_test_module_list")
SRCS = [] + select({
"@bazel_tools//src/conditions:windows": glob([
# TODO(mehrdadn): This should be added for all platforms once resulting errors are fixed
"**/conftest.py",
]),
"//conditions:default": [],
})
py_library(
name = "conftest",
srcs = glob(["**/conftest.py"]),
visibility = [
"//python/ray/tests:__subpackages__",
"//python/ray/dashboard:__pkg__",
"//python/ray/data:__pkg__",
],
)
py_test_module_list(
files = [
"test_dashboard.py",
"test_ray_cluster_with_external_redis.py",
"test_k8s_cluster_launcher.py",
],
size = "large",
extra_srcs = SRCS,
tags = ["exclusive", "manual", "team:serverless"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test(
name = "test_k8s_cluster_launcher",
srcs = ["test_k8s_cluster_launcher.py"],
tags = ["exclusive", "manual", "team:serverless"],
deps = ["//:ray_lib", ":conftest"],
data = ["test_cli_patterns/test_k8s_cluster_launcher.yaml"]
)
py_test_module_list(
@ -55,7 +63,6 @@ py_test_module_list(
"test_grpc_client_credentials.py",
"test_iter.py",
"test_job.py",
"test_joblib.py",
"test_get_locations.py",
"test_global_state.py",
"test_healthcheck.py",
@ -64,9 +71,17 @@ py_test_module_list(
"test_protobuf_compatibility.py"
],
size = "medium",
extra_srcs = SRCS,
tags = ["exclusive", "medium_size_python_tests_a_to_j", "team:core"],
deps = ["//:ray_lib"],
tags = ["exclusive", "client_tests", "team:serverless"],
deps = ["//:ray_lib", ":conftest"],
)
py_test(
name = "test_joblib",
srcs = ["test_joblib.py"],
data = ["mnist_784_100_samples.pkl"],
size = "medium",
tags = ["exclusive", "client_tests", "team:serverless"],
deps = ["//:ray_lib", ":conftest"],
)
py_test_module_list(
@ -75,9 +90,8 @@ py_test_module_list(
"test_client_reconnect.py",
],
size = "large",
extra_srcs = SRCS,
tags = ["exclusive", "client_tests", "team:serverless"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test_module_list(
@ -92,9 +106,8 @@ py_test_module_list(
"test_client_library_integration.py",
],
size = "medium",
extra_srcs = SRCS,
tags = ["exclusive", "client_tests", "team:serverless"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test_module_list(
@ -144,9 +157,8 @@ py_test_module_list(
"test_traceback.py",
],
size = "medium",
extra_srcs = SRCS,
tags = ["exclusive", "medium_size_python_tests_k_to_z", "team:core"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test_module_list(
@ -179,9 +191,8 @@ py_test_module_list(
"test_get_or_create_actor.py",
],
size = "small",
extra_srcs = SRCS,
tags = ["exclusive", "small_size_python_tests", "team:core"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test_module_list(
@ -189,9 +200,8 @@ py_test_module_list(
"test_gcs_ha_e2e.py",
],
size = "small",
extra_srcs = SRCS,
tags = ["exclusive", "ray_ha", "team:core"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
@ -200,7 +210,6 @@ py_test_module_list(
"test_autoscaler.py",
"test_autoscaler_drain_node_api.py",
"test_autoscaler_gcp.py",
"test_autoscaler_yaml.py",
"test_cli_logger.py",
"test_client_metadata.py",
"test_client_terminate.py",
@ -212,9 +221,26 @@ py_test_module_list(
"kuberay/test_autoscaling_config.py"
],
size = "small",
extra_srcs = SRCS,
tags = ["exclusive", "small_size_python_tests", "team:serverless"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test(
name = "test_autoscaler_yaml",
size = "small",
srcs = ["test_autoscaler_yaml.py"],
tags = ["exclusive", "small_size_python_tests", "team:serverless"],
deps = ["//:ray_lib", ":conftest"],
data = [
"additional_property.yaml",
"test_cli_patterns/test_multi_node.yaml",
"test_cli_patterns/test_no_head.yaml",
"test_cli_patterns/test_no_workers.yaml",
"//python/ray/autoscaler/aws:example",
"//python/ray/autoscaler/azure:example",
"//python/ray/autoscaler/gcp:example",
"//python/ray/autoscaler/local:example",
],
)
py_test_module_list(
@ -222,15 +248,13 @@ py_test_module_list(
"test_dataclient_disconnect.py",
],
size = "medium",
extra_srcs = SRCS,
tags = ["exclusive", "medium_size_python_tests_a_to_j", "team:serverless"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test_module_list(
files = [
"test_actor.py",
"test_cli.py",
"test_failure.py",
"test_actor_advanced.py",
"test_threaded_actor.py",
@ -244,9 +268,17 @@ py_test_module_list(
"test_exit_observability.py",
],
size = "large",
extra_srcs = SRCS,
tags = ["exclusive", "large_size_python_tests_shard_0", "team:core"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test(
name = "test_cli",
srcs = ["test_cli.py"],
data = glob(["test_cli_patterns/*.txt", "test_cli_patterns/*.yaml"]),
size = "large",
tags = ["exclusive", "large_size_python_tests_shard_0", "team:core"],
deps = ["//:ray_lib", ":conftest"],
)
py_test_module_list(
@ -265,9 +297,8 @@ py_test_module_list(
"test_multi_node_3.py",
],
size = "large",
extra_srcs = SRCS,
tags = ["exclusive", "large_size_python_tests_shard_1", "team:core"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test_module_list(
@ -280,9 +311,9 @@ py_test_module_list(
"test_runtime_env_working_dir_remote_uri.py"
],
size = "large",
extra_srcs = SRCS,
tags = ["exclusive", "large_size_python_tests_shard_2", "team:serve"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
data = ["pip_install_test-0.5-py3-none-any.whl"],
)
py_test_module_list(
@ -292,20 +323,27 @@ py_test_module_list(
"test_runtime_env_conda_and_pip_3.py",
"test_runtime_env_conda_and_pip_4.py",
"test_runtime_env_conda_and_pip_5.py",
"test_runtime_env_complicated.py"
],
size = "large",
extra_srcs = SRCS,
tags = ["exclusive", "post_wheel_build", "team:serve"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test(
name = "test_runtime_env_complicated",
size = "large",
srcs = ["test_runtime_env_complicated.py"],
tags = ["exclusive", "post_wheel_build", "team:serve"],
deps = ["//:ray_lib", ":conftest"],
data = ["//python/ray/experimental/packaging/example_pkg"],
)
py_test(
name = "test_actor_group",
size = "medium",
srcs = SRCS + ["test_actor_group.py"],
srcs = ["test_actor_group.py"],
tags = ["exclusive", "medium_size_python_tests_a_to_j", "team:serve"],
deps = ["//:ray_lib"]
deps = ["//:ray_lib", ":conftest"]
)
# TODO(barakmich): aws/ might want its own buildfile, or
@ -313,25 +351,25 @@ py_test(
py_test(
name = "test_autoscaler_aws",
size = "small",
srcs = SRCS + ["aws/test_autoscaler_aws.py"],
srcs = ["aws/test_autoscaler_aws.py"],
tags = ["exclusive", "small_size_python_tests", "team:serverless"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test(
name = "test_aws_batch_tag_update",
size = "small",
srcs = SRCS + ["aws/test_aws_batch_tag_update.py"],
srcs = ["aws/test_aws_batch_tag_update.py"],
tags = ["exclusive", "small_size_python_tests", "team:serverless"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test(
name = "test_gcp_node_provider",
size = "small",
srcs = SRCS + ["gcp/test_gcp_node_provider.py"],
srcs = ["gcp/test_gcp_node_provider.py"],
tags = ["exclusive", "small_size_python_tests", "team:serverless"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
# Note(simon): typing tests are not included in module list
@ -339,59 +377,63 @@ py_test(
py_test(
name = "test_typing",
size = "medium",
srcs = SRCS + ["test_typing.py", "typing_files/check_typing_bad.py",
"typing_files/check_typing_good.py"],
srcs = ["test_typing.py", "typing_files/check_typing_bad.py",
"typing_files/check_typing_good.py"],
tags = ["exclusive", "small_size_python_tests", "team:core"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
# TODO: use py_test(env = ...) in the build file with bazel 4.0
py_test(
name = "test_tracing",
size = "medium",
srcs = SRCS + ["test_tracing.py"],
srcs = ["test_tracing.py"],
tags = ["exclusive", "medium_size_python_tests_k_to_z", "team:serve"],
deps = ["//:ray_lib"]
deps = ["//:ray_lib", ":conftest"]
)
py_test(
name = "test_pydantic_serialization",
size = "small",
srcs = SRCS + ["test_pydantic_serialization.py", "pydantic_module.py"],
srcs = ["test_pydantic_serialization.py", "pydantic_module.py"],
tags = ["exclusive", "small_size_python_tests", "team:serve"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test(
name = "test_runtime_env_validation",
size = "small",
srcs = SRCS + ["test_runtime_env_validation.py"],
srcs = ["test_runtime_env_validation.py"],
data = [
"test_runtime_env_validation_1_schema.json",
"test_runtime_env_validation_2_schema.json",
],
tags = ["exclusive", "small_size_python_tests", "team:serve"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test(
name = "test_runtime_env_ray_minimal",
size = "medium",
srcs = SRCS + ["test_runtime_env_ray_minimal.py"],
srcs = ["test_runtime_env_ray_minimal.py"],
tags = ["exclusive", "medium_size_python_tests_k_to_z", "team:serve"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test(
name = "test_serve_ray_minimal",
size = "small",
srcs = SRCS + ["test_serve_ray_minimal.py"],
srcs = ["test_serve_ray_minimal.py"],
tags = ["exclusive", "small_size_python_tests", "team:serve"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test(
name = "kuberay/test_autoscaling_e2e",
size = "large",
srcs = SRCS + ["kuberay/test_autoscaling_e2e.py"],
srcs = ["kuberay/test_autoscaling_e2e.py"],
tags = ["exclusive", "kuberay_operator", "team:serverless"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
# TODO(ekl) we can't currently support tagging these as flaky since there's
@ -413,11 +455,10 @@ py_test_module_list(
"test_list_actors_4.py",
],
size = "large",
extra_srcs = SRCS,
name_suffix = "_client_mode",
env = {"RAY_CLIENT_MODE": "1", "RAY_PROFILING": "1"},
tags = ["exclusive", "client_tests", "team:serverless"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test_module_list(
@ -425,7 +466,6 @@ py_test_module_list(
"test_actor_in_container.py",
],
size = "large",
extra_srcs = SRCS,
tags = ["exclusive", "worker-container", "team:serve"],
# Now we run this test in a container which has installed ray
deps = [],
@ -441,10 +481,9 @@ py_test_module_list(
"test_array.py"
],
size = "large",
extra_srcs = SRCS,
name_suffix = "_debug_mode",
tags = ["exclusive", "debug_tests", "team:core"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)
py_test_module_list(
@ -457,8 +496,7 @@ py_test_module_list(
"test_array.py"
],
size = "large",
extra_srcs = SRCS,
name_suffix = "_asan",
tags = ["exclusive", "asan_tests", "team:core"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", ":conftest"],
)

View file

@ -2,6 +2,6 @@ py_test(
name = "test_modin",
size = "large",
srcs = ["test_modin.py"],
deps = ["//:ray_lib"],
deps = ["//:ray_lib", "//python/ray/tests:conftest"],
tags = ["team:core", "exclusive"],
)

View file

@ -93,7 +93,7 @@ class AutoscalingConfigTest(unittest.TestCase):
def testValidateDefaultConfig(self):
for config_path in CONFIG_PATHS:
try:
if "aws/example-multi-node-type.yaml" in config_path:
if os.path.join("aws", "example-multi-node-type.yaml") in config_path:
# aws tested in testValidateDefaultConfigAWSMultiNodeTypes.
continue
if "local" in config_path:

View file

@ -19,6 +19,7 @@ Note: config cache does not work with AWS mocks since the AWS resource ids are
"""
import glob
import multiprocessing as mp
import multiprocessing.connection
import os
import re
import sys
@ -115,7 +116,9 @@ def _unlink_test_ssh_key():
pass
def _start_ray_and_block(runner, child_conn: mp.connection.Connection, as_head: bool):
def _start_ray_and_block(
runner, child_conn: multiprocessing.connection.Connection, as_head: bool
):
"""Utility function to start a CLI command with `ray start --block`
This function is expected to be run in another process, where `child_conn` is used

View file

@ -320,7 +320,10 @@ RuntimeError: Failed to unpickle serialized exception"""
try:
ray.get(f.remote())
except Exception as ex:
assert clean_noqa(expected_output) == scrub_traceback(str(ex))
python310_extra_exc_msg = "test_unpickleable_stacktrace.<locals>.NoPickleError."
assert clean_noqa(expected_output) == scrub_traceback(str(ex)).replace(
f"TypeError: {python310_extra_exc_msg}", "TypeError: "
)
def test_serialization_error_message(shutdown_only):

View file

@ -77,9 +77,12 @@ def get_span_list():
def get_span_dict(span_list):
"""Given a list of span names, return dictionary of span names."""
strip_prefix = "python.ray.tests."
span_names = {}
for span in span_list:
span_name = span["name"]
if span_name.startswith(strip_prefix):
span_name = span_name[len(strip_prefix) :]
if span_name in span_names:
span_names[span_name] += 1
else:
@ -103,7 +106,7 @@ def task_helper():
# The spans could show up in a different order, so just check that
# all spans are as expected
span_names = get_span_dict(span_list)
return span_names == {
assert span_names == {
"test_tracing.f ray.remote": 1,
"test_tracing.f ray.remote_worker": 1,
}
@ -171,11 +174,11 @@ def async_actor_helper():
def test_tracing_task_init_workflow(cleanup_dirs, ray_start_init_tracing):
assert task_helper()
task_helper()
def test_tracing_task_start_workflow(cleanup_dirs, ray_start_cli_tracing):
assert task_helper()
task_helper()
def test_tracing_sync_actor_init_workflow(cleanup_dirs, ray_start_init_tracing):

View file

@ -68,6 +68,7 @@ py_test(
name = "test_cluster_searcher",
size = "large",
srcs = ["tests/test_cluster_searcher.py"],
data = ["tests/_test_cluster_interrupt_searcher.py"],
deps = [":tune_lib"],
tags = ["team:ml", "exclusive", "tests_dir_C"],
)

View file

@ -5,13 +5,7 @@
# --------------------------------------------------------------------
load("//bazel:python.bzl", "py_test_module_list")
SRCS = [] + select({
"@bazel_tools//src/conditions:windows": glob([
# TODO(mehrdadn): This should be added for all platforms once resulting errors are fixed
"**/conftest.py",
]),
"//conditions:default": [],
})
SRCS = glob(["**/conftest.py"])
LARGE_TESTS = ["tests/test_recovery.py", "tests/test_basic_workflows_2.py", "tests/test_metadata.py"]

View file

@ -304,7 +304,8 @@ def test_get_named_step_default(workflow_start_regular, tmp_path):
assert math.factorial(5) == workflow.run(factorial.bind(5), workflow_id="factorial")
for i in range(5):
step_name = (
"test_basic_workflows_2.test_get_named_step_default.locals.factorial"
"python.ray.workflow.tests.test_basic_workflows_2."
"test_get_named_step_default.locals.factorial"
)
if i != 0:
step_name += "_" + str(i)

View file

@ -78,7 +78,7 @@ pexpect
Pillow; platform_system != "Windows"
pygments
pyspark==3.1.2
pytest==5.4.3
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-rerunfailures
pytest-sugar

View file

@ -207,7 +207,8 @@ py_test(
name = "test_config",
tags = ["team:ci", "release_unit"],
size = "small",
srcs = ["ray_release/tests/test_config.py"]
srcs = ["ray_release/tests/test_config.py"],
data = ["release_tests.yaml"],
)
py_test(
@ -228,12 +229,18 @@ py_test(
name = "test_run_script",
tags = ["team:ci", "release_unit"],
size = "small",
srcs = ["ray_release/tests/test_run_script.py"]
srcs = ["ray_release/tests/test_run_script.py"],
data = [
"run_release_test.sh",
"ray_release/tests/_test_catch_args.py",
"ray_release/tests/_test_run_release_test_sh.py",
],
)
py_test(
name = "test_wheels",
tags = ["team:ci", "release_unit"],
size = "small",
srcs = ["ray_release/tests/test_wheels.py"]
srcs = ["ray_release/tests/test_wheels.py"],
deps = ["//:ray_lib"],
)

View file

@ -731,7 +731,8 @@ py_test(
name = "test_algorithm",
tags = ["team:rllib", "algorithms_dir", "algorithms_dir_generic"],
size = "large",
srcs = ["algorithms/tests/test_algorithm.py"]
srcs = ["algorithms/tests/test_algorithm.py"],
data = ["tests/data/cartpole/small.json"],
)
py_test(
@ -845,6 +846,7 @@ py_test(
name = "test_cql",
tags = ["team:rllib", "algorithms_dir"],
size = "large",
data = ["tests/data/pendulum/small.json"],
srcs = ["algorithms/cql/tests/test_cql.py"]
)
@ -853,7 +855,8 @@ py_test(
name = "test_crr",
tags = ["team:rllib", "algorithms_dir"],
size = "medium",
srcs = ["algorithms/crr/tests/test_crr.py"]
srcs = ["algorithms/crr/tests/test_crr.py"],
data = ["tests/data/pendulum/large.json"],
)
# DDPG
@ -916,7 +919,11 @@ py_test(
tags = ["team:rllib", "algorithms_dir"],
size = "large",
# Include the json data file.
data = ["tests/data/cartpole/large.json"],
data = [
"tests/data/cartpole/large.json",
"tests/data/pendulum/large.json",
"tests/data/cartpole/small.json",
],
srcs = ["algorithms/marwil/tests/test_marwil.py"]
)
@ -1671,7 +1678,11 @@ py_test(
name = "test_dataset_reader",
tags = ["team:rllib", "offline"],
size = "medium",
srcs = ["offline/tests/test_dataset_reader.py"]
srcs = ["offline/tests/test_dataset_reader.py"],
data = [
"tests/data/pendulum/large.json",
"tests/data/pendulum/enormous.zip",
],
)
py_test(