[docker] Default to ray-ml image (#12703)

This commit is contained in:
Ian Rodney 2020-12-09 11:49:16 -08:00 committed by GitHub
parent 6f3aacd087
commit 19542c5eb0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 35 additions and 27 deletions

View file

@ -19,7 +19,8 @@ upscaling_speed: 1.0
# and opens all the necessary ports to support the Ray cluster.
# Empty string means disabled.
docker:
image: "rayproject/ray:latest-gpu" # You can change this to latest-cpu if you don't need GPU support and want a faster startup
image: "rayproject/ray-ml:latest-gpu" # You can change this to latest-cpu if you don't need GPU support and want a faster startup
# image: rayproject/ray:latest-gpu # use this one if you don't need ML dependencies, it's faster to pull
container_name: "ray_container"
# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image
# if no cached version is present.
@ -27,10 +28,10 @@ docker:
run_options: [] # Extra options to pass into "docker run"
# Example of running a GPU head with CPU workers
# head_image: "rayproject/ray:latest-gpu"
# head_image: "rayproject/ray-ml:latest-gpu"
# Allow Ray to automatically detect GPUs
# worker_image: "rayproject/ray:latest-cpu"
# worker_image: "rayproject/ray-ml:latest-cpu"
# worker_run_options: []
# If a node is idle for this many minutes, it will be removed.

View file

@ -19,13 +19,14 @@ upscaling_speed: 1.0
# and opens all the necessary ports to support the Ray cluster.
# Empty string means disabled.
docker:
image: "rayproject/ray:latest-gpu"
image: "rayproject/ray-ml:latest-gpu"
# image: rayproject/ray:latest-gpu # use this one if you don't need ML dependencies, it's faster to pull
container_name: "ray_nvidia_docker" # e.g. ray_docker
# # Example of running a GPU head with CPU workers
# head_image: "rayproject/ray:latest-gpu"
# head_image: "rayproject/ray-ml:latest-gpu"
# worker_image: "rayproject/ray:latest"
# worker_image: "rayproject/ray-ml:latest"
# If a node is idle for this many minutes, it will be removed.
idle_timeout_minutes: 5

View file

@ -24,7 +24,7 @@ upscaling_speed: 1.0
# and opens all the necessary ports to support the Ray cluster.
# Empty string means disabled.
docker:
image: "" # e.g., rayproject/ray:latest
image: "" # e.g., rayproject/ray-ml:latest
container_name: "" # e.g. ray_docker
# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image
# if no cached version is present.
@ -32,9 +32,9 @@ docker:
run_options: [] # Extra options to pass into "docker run"
# Example of running a GPU head with CPU workers
# head_image: "rayproject/ray:latest-gpu"
# head_image: "rayproject/ray-ml:latest-gpu"
# worker_image: "rayproject/ray:latest"
# worker_image: "rayproject/ray-ml:latest"
# If a node is idle for this many minutes, it will be removed.
idle_timeout_minutes: 5

View file

@ -19,7 +19,8 @@ upscaling_speed: 1.0
# and opens all the necessary ports to support the Ray cluster.
# Empty string means disabled.
docker:
image: "rayproject/ray:latest-gpu" # You can change this to latest-cpu if you don't need GPU support and want a faster startup
image: "rayproject/ray-ml:latest-gpu" # You can change this to latest-cpu if you don't need GPU support and want a faster startup
# image: rayproject/ray:latest-gpu # use this one if you don't need ML dependencies, it's faster to pull
container_name: "ray_container"
# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image
# if no cached version is present.
@ -27,10 +28,10 @@ docker:
run_options: [] # Extra options to pass into "docker run"
# Example of running a GPU head with CPU workers
# head_image: "rayproject/ray:latest-gpu"
# head_image: "rayproject/ray-ml:latest-gpu"
# Allow Ray to automatically detect GPUs
# worker_image: "rayproject/ray:latest-cpu"
# worker_image: "rayproject/ray-ml:latest-cpu"
# worker_run_options: []
# If a node is idle for this many minutes, it will be removed.

View file

@ -19,13 +19,14 @@ upscaling_speed: 1.0
# and opens all the necessary ports to support the Ray cluster.
# Empty string means disabled.
docker:
image: "rayproject/ray:latest-gpu"
image: "rayproject/ray-ml:latest-gpu"
# image: rayproject/ray:latest-gpu # use this one if you don't need ML dependencies, it's faster to pull
container_name: "ray_nvidia_docker" # e.g. ray_docker
# # Example of running a GPU head with CPU workers
# head_image: "rayproject/ray:latest-gpu"
# head_image: "rayproject/ray-ml:latest-gpu"
# worker_image: "rayproject/ray:latest"
# worker_image: "rayproject/ray-ml:latest"
# If a node is idle for this many minutes, it will be removed.
idle_timeout_minutes: 5
@ -65,7 +66,7 @@ file_mounts: {
}
# List of shell commands to run to set up nodes.
# NOTE: rayproject/ray:latest has ray latest bundled
# NOTE: rayproject/ray-ml:latest has ray latest bundled
setup_commands: []
# - pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-1.1.0.dev0-cp37-cp37m-manylinux2014_x86_64.whl

View file

@ -19,7 +19,8 @@ upscaling_speed: 1.0
# and opens all the necessary ports to support the Ray cluster.
# Empty string means disabled.
docker:
image: "rayproject/ray:latest-gpu"
image: "rayproject/ray-ml:latest-gpu"
# image: rayproject/ray:latest-gpu # use this one if you don't need ML dependencies, it's faster to pull
container_name: "ray_docker"
# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image
# if no cached version is present.
@ -27,9 +28,9 @@ docker:
run_options: [] # Extra options to pass into "docker run"
# Example of running a GPU head with CPU workers
# head_image: "rayproject/ray:latest-gpu"
# head_image: "rayproject/ray-ml:latest-gpu"
# worker_image: "rayproject/ray:latest"
# worker_image: "rayproject/ray-ml:latest"
# If a node is idle for this many minutes, it will be removed.
idle_timeout_minutes: 5

View file

@ -19,7 +19,8 @@ upscaling_speed: 1.0
# and opens all the necessary ports to support the Ray cluster.
# Empty string means disabled.
docker:
image: "rayproject/ray:latest-gpu" # You can change this to latest-cpu if you don't need GPU support and want a faster startup
image: "rayproject/ray-ml:latest-gpu" # You can change this to latest-cpu if you don't need GPU support and want a faster startup
# image: rayproject/ray:latest-gpu # use this one if you don't need ML dependencies, it's faster to pull
container_name: "ray_container"
# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image
# if no cached version is present.
@ -27,10 +28,10 @@ docker:
run_options: [] # Extra options to pass into "docker run"
# Example of running a GPU head with CPU workers
# head_image: "rayproject/ray:latest-gpu"
# head_image: "rayproject/ray-ml:latest-gpu"
# Allow Ray to automatically detect GPUs
# worker_image: "rayproject/ray:latest-cpu"
# worker_image: "rayproject/ray-ml:latest-cpu"
# worker_run_options: []
# If a node is idle for this many minutes, it will be removed.

View file

@ -19,14 +19,15 @@ upscaling_speed: 1.0
# and opens all the necessary ports to support the Ray cluster.
# Empty string means disabled.
docker:
image: "rayproject/ray:latest-gpu"
image: "rayproject/ray-ml:latest-gpu"
# image: rayproject/ray:latest-gpu # use this one if you don't need ML dependencies, it's faster to pull
container_name: "ray_nvidia_docker" # e.g. ray_docker
# # Example of running a GPU head with CPU workers
# head_image: "rayproject/ray:latest-gpu"
# head_image: "rayproject/ray-ml:latest-gpu"
# worker_image: "rayproject/ray:latest"
# worker_image: "rayproject/ray-ml:latest"
# If a node is idle for this many minutes, it will be removed.
idle_timeout_minutes: 5
@ -117,7 +118,7 @@ initialization_commands:
done"
# List of shell commands to run to set up nodes.
# NOTE: rayproject/ray:latest has ray latest bundled
# NOTE: rayproject/ray-ml:latest has ray latest bundled
setup_commands: []
# - pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-1.1.0.dev0-cp36-cp36m-manylinux2014_x86_64.whl
# - pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-1.1.0.dev0-cp37-cp37m-manylinux2014_x86_64.whl

View file

@ -25,7 +25,8 @@ idle_timeout_minutes: 5
# and opens all the necessary ports to support the Ray cluster.
# Empty string means disabled. Assumes Docker is installed.
docker:
image: "rayproject/ray:latest-gpu" # You can change this to latest-cpu if you don't need GPU support and want a faster startup
image: "rayproject/ray-ml:latest-gpu" # You can change this to latest-cpu if you don't need GPU support and want a faster startup
# image: rayproject/ray:latest-gpu # use this one if you don't need ML dependencies, it's faster to pull
container_name: "ray_container"
# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image
# if no cached version is present.