# An unique identifier for the head node and workers of this cluster. cluster_name: lm-cluster # The minimum number of workers nodes to launch in addition to the head # node. This number should be >= 0. min_workers: 1 # The maximum number of workers nodes to launch in addition to the head # node. This takes precedence over min_workers. max_workers: 2 # The initial number of worker nodes to launch in addition to the head # node. When the cluster is first brought up (or when it is refreshed with a # subsequent `ray up`) this number of nodes will be started. initial_workers: 1 # Whether or not to autoscale aggressively. If this is enabled, if at any point # we would start more workers, we start at least enough to bring us to # initial_workers. autoscaling_mode: default # The autoscaler will scale up the cluster to this target fraction of resource # usage. For example, if a cluster of 10 nodes is 100% busy and # target_utilization is 0.8, it would resize the cluster to 13. This fraction # can be decreased to increase the aggressiveness of upscaling. # This value must be less than 1.0 for scaling to happen. target_utilization_fraction: 0.48 # If a node is idle for this many minutes, it will be removed. idle_timeout_minutes: 5 # Cloud-provider specific configuration. provider: type: aws region: us-west-2 # Availability zone(s), comma-separated, that nodes may be launched in. # Nodes are currently spread between zones by a round-robin approach, # however this implementation detail should not be relied upon. availability_zone: us-west-2a,us-west-2b # How Ray will authenticate with newly launched nodes. auth: ssh_user: ubuntu # By default Ray creates a new private keypair, but you can also use your own. # If you do so, make sure to also set "KeyName" in the head and worker node # configurations below. # ssh_private_key: /path/to/your/key.pem # Provider-specific config for the head node, e.g. instance type. By default # Ray will auto-configure unspecified fields such as SubnetId and KeyName. # For more documentation on available fields, see: # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances head_node: InstanceType: m5.xlarge ImageId: ami-0b294f219d14e6a82 # Deep Learning AMI (Ubuntu) Version 21.0 SecurityGroupIds: - "{{SecurityGroupId}}" # You can provision additional disk space with a conf as follows BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: VolumeSize: 100 # Additional options in the boto docs. # Provider-specific config for worker nodes, e.g. instance type. By default # Ray will auto-configure unspecified fields such as SubnetId and KeyName. # For more documentation on available fields, see: # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances worker_nodes: InstanceType: p3.2xlarge ImageId: ami-0b294f219d14e6a82 # Deep Learning AMI (Ubuntu) Version 21.0 SecurityGroupIds: - "{{SecurityGroupId}}" # Run workers on spot by default. Comment this out to use on-demand. InstanceMarketOptions: MarketType: spot # Additional options can be found in the boto docs, e.g. # SpotOptions: # MaxPrice: MAX_HOURLY_PRICE # Additional options in the boto docs. # List of shell commands to run to set up nodes. setup_commands: # Note: if you're developing Ray, you probably want to create an AMI that # has your Ray repo pre-cloned. Then, you can replace the pip installs # below with a git checkout (and possibly a recompile). - echo 'export PATH="$HOME/anaconda3/envs/pytorch_p36/bin:$PATH"' >> ~/.bashrc; source ~/.bashrc; pip install -U ray; pip install -U fairseq==0.8.0; - sudo kill -9 `sudo lsof /var/lib/dpkg/lock-frontend | awk '{print $2}' | tail -n 1`; sudo pkill -9 apt-get; sudo pkill -9 dpkg; sudo dpkg --configure -a; sudo apt-get -y install binutils; cd $HOME; git clone https://github.com/aws/efs-utils; cd $HOME/efs-utils; ./build-deb.sh; sudo apt-get -y install ./build/amazon-efs-utils*deb; cd $HOME; mkdir efs; sudo mount -t efs {{FileSystemId}}:/ efs; sudo chmod 777 efs; # Custom commands that will be run on the head node after common setup. head_setup_commands: - pip install boto3==1.4.8 # 1.4.8 adds InstanceMarketOptions # Custom commands that will be run on worker nodes after common setup. worker_setup_commands: [] # Command to start ray on the head node. You don't need to change this. head_start_ray_commands: - ray stop - ulimit -n 65536; ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml # Command to start ray on worker nodes. You don't need to change this. worker_start_ray_commands: - ray stop - ulimit -n 65536; ray start --address=$RAY_HEAD_IP:6379 --object-manager-port=8076