ray/release/long_running_distributed_tests/cluster.yaml

46 lines
1.2 KiB
YAML
Raw Normal View History

cluster_name: long-running-distributed-tests
min_workers: 3
max_workers: 3
target_utilization_fraction: 0.8
idle_timeout_minutes: 15
docker:
image: anyscale/ray-ml:latest-gpu
container_name: ray_container
pull_before_run: True
provider:
type: aws
region: us-west-2
availability_zone: us-west-2a
cache_stopped_nodes: False
auth:
ssh_user: ubuntu
head_node:
InstanceType: g3.8xlarge
worker_nodes:
InstanceType: g3.8xlarge
InstanceMarketOptions:
MarketType: spot
setup_commands:
- apt-get install -y libglib2.0-0 libcudnn7=7.6.5.32-1+cuda10.1
- pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-1.2.0.dev0-cp37-cp37m-manylinux2014_x86_64.whl
# Command to start ray on the head node. You don't need to change this.
head_start_ray_commands:
- ray stop
- export RAY_BACKEND_LOG_LEVEL=debug
2020-09-23 17:04:13 -07:00
- ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml
# Command to start ray on worker nodes. You don't need to change this.
worker_start_ray_commands:
- ray stop
- export RAY_BACKEND_LOG_LEVEL=debug
- ray start --address=$RAY_HEAD_IP:6379 --object-manager-port=8076