ray/ci/long_running_distributed_tests/ray-project/cluster.yaml

87 lines
3.2 KiB
YAML
Raw Normal View History

# This file is generated by `ray project create`.
# A unique identifier for the head node and workers of this cluster.
cluster_name: long-running-distributed-tests
# The minimum number of workers nodes to launch in addition to the head
# node. This number should be >= 0.
min_workers: 3
# The maximum number of workers nodes to launch in addition to the head
# node. This takes precedence over min_workers. min_workers defaults to 0.
max_workers: 3
# The autoscaler will scale up the cluster to this target fraction of resource
# usage. For example, if a cluster of 10 nodes is 100% busy and
# target_utilization is 0.8, it would resize the cluster to 13. This fraction
# can be decreased to increase the aggressiveness of upscaling.
# This value must be less than 1.0 for scaling to happen.
target_utilization_fraction: 0.8
# If a node is idle for this many minutes, it will be removed.
idle_timeout_minutes: 5
# Cloud-provider specific configuration.
provider:
type: aws
region: us-west-2
availability_zone: us-west-2a
cache_stopped_nodes: False
# How Ray will authenticate with newly launched nodes.
auth:
ssh_user: ubuntu
# By default Ray creates a new private keypair, but you can also use your own.
# If you do so, make sure to also set "KeyName" in the head and worker node
# configurations below.
# ssh_private_key: /path/to/your/key.pem
# Provider-specific config for the head node, e.g. instance type. By default
# Ray will auto-configure unspecified fields such as SubnetId and KeyName.
# For more documentation on available fields, see:
# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances
head_node:
InstanceType: g3.8xlarge
ImageId: ami-0888a3b5189309429 # DLAMI 7/1/19
BlockDeviceMappings:
- DeviceName: /dev/sda1
Ebs:
VolumeSize: 150
worker_nodes:
InstanceType: g3.8xlarge
ImageId: ami-0888a3b5189309429 # DLAMI 7/1/19
BlockDeviceMappings:
- DeviceName: /dev/sda1
Ebs:
VolumeSize: 150
InstanceMarketOptions:
MarketType: spot
setup_commands:
# Install ray.
- pip install -U pip
- ray || pip install -U {{ray-wheel}}
# Installing this without -U to make sure we don't replace the existing Ray installation
- pip install ray[rllib]
- pip install -U ipdb
# There have been some recent problems with torch 1.5 and torchvision 0.6
# not recognizing GPUs.
# So, we force install torch 1.4 and torchvision 0.5.
# https://github.com/pytorch/pytorch/issues/37212#issuecomment-623198624.
- pip install torch==1.4.0 torchvision==0.5.0
- echo set-window-option -g mouse on > ~/.tmux.conf
- echo 'termcapinfo xterm* ti@:te@' > ~/.screenrc
# Command to start ray on the head node. You don't need to change this.
head_start_ray_commands:
- ray stop
- export RAY_BACKEND_LOG_LEVEL=debug
- ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml
# Command to start ray on worker nodes. You don't need to change this.
worker_start_ray_commands:
- ray stop
- export RAY_BACKEND_LOG_LEVEL=debug
- ray start --address=$RAY_HEAD_IP:6379 --object-manager-port=8076