mirror of
https://github.com/vale981/ray
synced 2025-03-06 10:31:39 -05:00

* prepare for head node * move command runner interface outside _private * remove space * Eric * flake * min_workers in multi node type * fixing edge cases * eric not idle * fix target_workers to consider min_workers of node types * idle timeout * minor * minor fix * test * lint * eric v2 * eric 3 * min_workers constraint before bin packing * Update resource_demand_scheduler.py * Revert "Update resource_demand_scheduler.py" This reverts commit 818a63a2c86d8437b3ef21c5035d701c1d1127b5. * reducing diff * make get_nodes_to_launch return a dict * merge * weird merge fix * auto fill instance types for AWS * Alex/Eric * Update doc/source/cluster/autoscaling.rst * merge autofill and input from user * logger.exception * make the yaml use the default autofill * docs Eric * remove test_autoscaler_yaml from windows tests * lets try changing the test a bit * return test * lets see * edward * Limit max launch concurrency * commenting frac TODO * move to resource demand scheduler * use STATUS UP TO DATE * Eric * make logger of gc freed refs debug instead of info * add cluster name to docker mount prefix directory * grrR * fix tests * moving docker directory to sdk * move the import to prevent circular dependency * smallf fix * ian * fix max launch concurrency bug to assume failing nodes as pending and consider only load_metric's connected nodes as running * small fix * deflake test_joblib * lint * placement groups bypass * remove space * Eric * first ocmmit * lint * exmaple * documentation * hmm * file path fix * fix test * some format issue in docs * modified docs * joblib strikes again on windows * add ability to not start autoscaler/monitor * a * remove worker_default * Remove default pod type from operator * Remove worker_default_node_type from rewrite_legacy_yaml_to_availble_node_types * deprecate useless fields Co-authored-by: Ameer Haj Ali <ameerhajali@ameers-mbp.lan> Co-authored-by: Alex Wu <alex@anyscale.io> Co-authored-by: Alex Wu <itswu.alex@gmail.com> Co-authored-by: Eric Liang <ekhliang@gmail.com> Co-authored-by: Ameer Haj Ali <ameerhajali@Ameers-MacBook-Pro.local> Co-authored-by: root <root@ip-172-31-56-188.us-west-2.compute.internal> Co-authored-by: Dmitri Gekhtman <dmitri.m.gekhtman@gmail.com>
110 lines
4.1 KiB
YAML
110 lines
4.1 KiB
YAML
####################################################################
|
|
# All nodes in this cluster will auto-terminate in 1 hour
|
|
####################################################################
|
|
|
|
# An unique identifier for the head node and workers of this cluster.
|
|
cluster_name: autoscaler-stress-test
|
|
|
|
# The minimum number of workers nodes to launch in addition to the head
|
|
# node. This number should be >= 0.
|
|
min_workers: 100
|
|
|
|
# The maximum number of workers nodes to launch in addition to the head
|
|
# node. This takes precedence over min_workers.
|
|
max_workers: 100
|
|
|
|
# If a node is idle for this many minutes, it will be removed.
|
|
idle_timeout_minutes: 5
|
|
|
|
# Cloud-provider specific configuration.
|
|
provider:
|
|
type: aws
|
|
region: us-west-1
|
|
availability_zone: us-west-1a
|
|
cache_stopped_nodes: False
|
|
|
|
# How Ray will authenticate with newly launched nodes.
|
|
auth:
|
|
ssh_user: ubuntu
|
|
# By default Ray creates a new private keypair, but you can also use your own.
|
|
# If you do so, make sure to also set "KeyName" in the head and worker node
|
|
# configurations below.
|
|
# ssh_private_key: /path/to/your/key.pem
|
|
|
|
# Provider-specific config for the head node, e.g. instance type. By default
|
|
# Ray will auto-configure unspecified fields such as SubnetId and KeyName.
|
|
# For more documentation on available fields, see:
|
|
# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances
|
|
head_node:
|
|
InstanceType: m4.16xlarge
|
|
ImageId: ami-0cc472544ce594a19 # Custom ami
|
|
|
|
# Set primary volume to 25 GiB
|
|
BlockDeviceMappings:
|
|
- DeviceName: /dev/sda1
|
|
Ebs:
|
|
VolumeSize: 100
|
|
|
|
# Additional options in the boto docs.
|
|
|
|
docker:
|
|
image: "rayproject/ray:latest-gpu" # You can change this to latest-cpu if you don't need GPU support and want a faster startup
|
|
container_name: "ray_container"
|
|
# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image
|
|
# if no cached version is present.
|
|
pull_before_run: True
|
|
run_options: ["--ulimit nofile=1045876"] # Extra options to pass into "docker run"
|
|
|
|
|
|
# Provider-specific config for worker nodes, e.g. instance type. By default
|
|
# Ray will auto-configure unspecified fields such as SubnetId and KeyName.
|
|
# For more documentation on available fields, see:
|
|
# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances
|
|
worker_nodes:
|
|
InstanceType: m4.large
|
|
ImageId: ami-0cc472544ce594a19 # Custom ami
|
|
|
|
# Set primary volume to 25 GiB
|
|
BlockDeviceMappings:
|
|
- DeviceName: /dev/sda1
|
|
Ebs:
|
|
VolumeSize: 100
|
|
|
|
# Run workers on spot by default. Comment this out to use on-demand.
|
|
InstanceMarketOptions:
|
|
MarketType: spot
|
|
# Additional options can be found in the boto docs, e.g.
|
|
# SpotOptions:
|
|
# MaxPrice: MAX_HOURLY_PRICE
|
|
|
|
# Additional options in the boto docs.
|
|
|
|
# List of shell commands to run to set up nodes.
|
|
setup_commands:
|
|
# Uncomment these if you want to build ray from source.
|
|
# - sudo apt-get -qq update
|
|
# - sudo apt-get install -y build-essential curl unzip
|
|
# # Build Ray.
|
|
# - git clone https://github.com/ray-project/ray || true
|
|
# - ray/ci/travis/install-bazel.sh
|
|
- pip install -U pip
|
|
- pip install terminado
|
|
- pip install boto3==1.4.8 cython==0.29.0
|
|
# - cd ray/python; git checkout master; git pull; pip install -e . --verbose
|
|
- pip install -U pip install https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-2.0.0.dev0-cp38-cp38-manylinux2014_x86_64.whl
|
|
|
|
# Custom commands that will be run on the head node after common setup.
|
|
head_setup_commands: []
|
|
|
|
# Custom commands that will be run on worker nodes after common setup.
|
|
worker_setup_commands: []
|
|
|
|
# Command to start ray on the head node. You don't need to change this.
|
|
head_start_ray_commands:
|
|
- ray stop
|
|
- ulimit -n 65536; ray start --head --port=6379 --autoscaling-config=~/ray_bootstrap_config.yaml
|
|
|
|
# Command to start ray on worker nodes. You don't need to change this.
|
|
worker_start_ray_commands:
|
|
- ray stop
|
|
- ulimit -n 65536; ray start --address=$RAY_HEAD_IP:6379 --num-gpus=100
|