mirror of
https://github.com/vale981/ray
synced 2025-03-05 18:11:42 -05:00
This reverts commit d4d71985d5
.
This commit is contained in:
parent
d2caa00be8
commit
e4f4c79252
6 changed files with 29 additions and 45 deletions
|
@ -64,7 +64,7 @@ steps:
|
||||||
commands:
|
commands:
|
||||||
- *prelude_commands
|
- *prelude_commands
|
||||||
- TORCH_VERSION=1.6 ./ci/travis/install-dependencies.sh
|
- TORCH_VERSION=1.6 ./ci/travis/install-dependencies.sh
|
||||||
- bazel test --config=ci --test_env=CI $(./scripts/bazel_export_options) --build_tests_only --test_tag_filters=-flaky,-flaky-mac,-post_wheel_build --
|
- bazel test --config=ci --test_env=CI $(./scripts/bazel_export_options) --build_tests_only --test_tag_filters=-flaky,-flaky-mac --
|
||||||
//:all python/ray/serve/... python/ray/dashboard/... -rllib/... -core_worker_test
|
//:all python/ray/serve/... python/ray/dashboard/... -rllib/... -core_worker_test
|
||||||
- *epilogue_commands
|
- *epilogue_commands
|
||||||
|
|
||||||
|
|
|
@ -182,9 +182,7 @@
|
||||||
- TORCH_VERSION=1.6 ./ci/travis/install-dependencies.sh
|
- TORCH_VERSION=1.6 ./ci/travis/install-dependencies.sh
|
||||||
- ./dashboard/tests/run_ui_tests.sh
|
- ./dashboard/tests/run_ui_tests.sh
|
||||||
- bazel test --config=ci $(./scripts/bazel_export_options) python/ray/dashboard/...
|
- bazel test --config=ci $(./scripts/bazel_export_options) python/ray/dashboard/...
|
||||||
- bazel test --config=ci $(./scripts/bazel_export_options)
|
- bazel test --config=ci $(./scripts/bazel_export_options) python/ray/serve/...
|
||||||
--test_tag_filters=-post_wheel_build
|
|
||||||
python/ray/serve/...
|
|
||||||
|
|
||||||
- label: ":python: Minimal install"
|
- label: ":python: Minimal install"
|
||||||
conditions: ["RAY_CI_PYTHON_AFFECTED"]
|
conditions: ["RAY_CI_PYTHON_AFFECTED"]
|
||||||
|
|
|
@ -238,31 +238,27 @@ Ray Serve supports serving deployments with different (possibly conflicting)
|
||||||
Python dependencies. For example, you can simultaneously serve one deployment
|
Python dependencies. For example, you can simultaneously serve one deployment
|
||||||
that uses legacy Tensorflow 1 and another that uses Tensorflow 2.
|
that uses legacy Tensorflow 1 and another that uses Tensorflow 2.
|
||||||
|
|
||||||
This is supported on Mac OS and Linux using Ray's :ref:`runtime-environments` feature.
|
Currently this is supported on Mac OS and Linux using `conda <https://docs.conda.io/en/latest/>`_
|
||||||
As with all other Ray actor options, pass the runtime environment in via ``ray_actor_options`` in
|
via Ray's built-in ``runtime_env`` option for actors.
|
||||||
your deployment. Be sure to first run ``pip install "ray[default]"`` to ensure the
|
As with all other actor options, pass these in via ``ray_actor_options`` in
|
||||||
Runtime Environments feature is installed.
|
your deployment.
|
||||||
|
You must have a conda environment set up for each set of
|
||||||
|
dependencies you want to isolate. If using a multi-node cluster, the
|
||||||
|
desired conda environment must be present on all nodes. Also, the Python patch version
|
||||||
|
(e.g. 3.8.10) must be identical on all nodes (this is a requirement for any Ray cluster).
|
||||||
|
See :ref:`runtime-environments` for details.
|
||||||
|
|
||||||
Example:
|
Here's an example script. For it to work, first create a conda
|
||||||
|
environment named ``ray-tf1`` with Ray Serve and Tensorflow 1 installed,
|
||||||
|
and another named ``ray-tf2`` with Ray Serve and Tensorflow 2. The Ray and
|
||||||
|
Python versions must be the same in both environments.
|
||||||
|
|
||||||
.. literalinclude:: ../../../python/ray/serve/examples/doc/conda_env.py
|
.. literalinclude:: ../../../python/ray/serve/examples/doc/conda_env.py
|
||||||
|
|
||||||
.. note::
|
|
||||||
When using a Ray library (for example, Ray Serve) in a runtime environment, it must
|
|
||||||
explicitly be included in the dependencies, as in the above example. This is not
|
|
||||||
required when just using Ray Core.
|
|
||||||
|
|
||||||
.. tip::
|
|
||||||
Avoid dynamically installing packages that install from source: these can be slow and
|
|
||||||
use up all resources while installing, leading to problems with the Ray cluster. Consider
|
|
||||||
precompiling such packages in a private repository or Docker image.
|
|
||||||
|
|
||||||
The dependencies required in the deployment may be different than
|
The dependencies required in the deployment may be different than
|
||||||
the dependencies installed in the driver program (the one running Serve API
|
the dependencies installed in the driver program (the one running Serve API
|
||||||
calls). In this case, you should use a delayed import within the class to avoid
|
calls). In this case, you should use a delayed import within the class to avoid
|
||||||
importing unavailable packages in the driver. This applies even when not
|
importing unavailable packages in the driver.
|
||||||
using runtime environments.
|
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
.. literalinclude:: ../../../python/ray/serve/examples/doc/imported_backend.py
|
.. literalinclude:: ../../../python/ray/serve/examples/doc/imported_backend.py
|
||||||
|
|
|
@ -338,11 +338,3 @@ py_test(
|
||||||
tags = ["exclusive", "team:serve"],
|
tags = ["exclusive", "team:serve"],
|
||||||
deps = [":serve_lib"]
|
deps = [":serve_lib"]
|
||||||
)
|
)
|
||||||
|
|
||||||
py_test(
|
|
||||||
name = "conda_env",
|
|
||||||
size = "medium",
|
|
||||||
srcs = glob(["examples/doc/*.py"]),
|
|
||||||
tags = ["exclusive", "post_wheel_build", "team:serve"],
|
|
||||||
deps = [":serve_lib"]
|
|
||||||
)
|
|
||||||
|
|
|
@ -1073,9 +1073,8 @@ class BackendState:
|
||||||
f"Deployment '{self._name}' has "
|
f"Deployment '{self._name}' has "
|
||||||
f"{len(slow_start_replicas)} replicas that have taken "
|
f"{len(slow_start_replicas)} replicas that have taken "
|
||||||
f"more than {SLOW_STARTUP_WARNING_S}s to start up. This "
|
f"more than {SLOW_STARTUP_WARNING_S}s to start up. This "
|
||||||
"may be caused by waiting for the cluster to auto-scale, "
|
"may be caused by waiting for the cluster to auto-scale "
|
||||||
"waiting for a runtime environment to install, or a slow "
|
"or because the constructor is slow. Resources required "
|
||||||
"constructor. Resources required "
|
|
||||||
f"for each replica: {required}, resources available: "
|
f"for each replica: {required}, resources available: "
|
||||||
f"{available}. component=serve deployment={self._name}")
|
f"{available}. component=serve deployment={self._name}")
|
||||||
|
|
||||||
|
|
|
@ -1,28 +1,27 @@
|
||||||
import requests
|
import requests
|
||||||
from ray import serve
|
from ray import serve
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
serve.start()
|
serve.start()
|
||||||
|
|
||||||
|
|
||||||
@serve.deployment
|
@serve.deployment
|
||||||
def requests_version(request):
|
def tf_version(request):
|
||||||
return requests.__version__
|
return ("Tensorflow " + tf.__version__)
|
||||||
|
|
||||||
|
|
||||||
requests_version.options(
|
tf_version.options(
|
||||||
name="25",
|
name="tf1", ray_actor_options={
|
||||||
ray_actor_options={
|
|
||||||
"runtime_env": {
|
"runtime_env": {
|
||||||
"pip": ["ray[serve]", "requests==2.25.1"]
|
"conda": "ray-tf1"
|
||||||
}
|
}
|
||||||
}).deploy()
|
}).deploy()
|
||||||
requests_version.options(
|
tf_version.options(
|
||||||
name="26",
|
name="tf2", ray_actor_options={
|
||||||
ray_actor_options={
|
|
||||||
"runtime_env": {
|
"runtime_env": {
|
||||||
"pip": ["ray[serve]", "requests==2.26.0"]
|
"conda": "ray-tf2"
|
||||||
}
|
}
|
||||||
}).deploy()
|
}).deploy()
|
||||||
|
|
||||||
assert requests.get("http://127.0.0.1:8000/25").text == "2.25.1"
|
print(requests.get("http://127.0.0.1:8000/tf1").text) # Tensorflow 1.15.0
|
||||||
assert requests.get("http://127.0.0.1:8000/26").text == "2.26.0"
|
print(requests.get("http://127.0.0.1:8000/tf2").text) # Tensorflow 2.3.0
|
||||||
|
|
Loading…
Add table
Reference in a new issue