mirror of
https://github.com/vale981/ray
synced 2025-03-08 19:41:38 -05:00

Between version1 and 2 of [this](https://console.anyscale-staging.com/o/anyscale-internal/configurations/app-config-versions/apt_TsCpJCRjMJDpNFhNgJmyCniS) cluster_env, 1 fails and 2 succeeds. btw, we really should start to think about a systematic approach towards our python dependency story. - between client and server - but more importantly server side, and any conflicts among requirements - how are pip freeze result evolving over time
20 lines
735 B
YAML
20 lines
735 B
YAML
base_image: {{ env["RAY_IMAGE_ML_NIGHTLY_GPU"] | default("anyscale/ray-ml:nightly-py37-gpu") }}
|
|
|
|
debian_packages:
|
|
- curl
|
|
|
|
python:
|
|
pip_packages:
|
|
- ray-lightning
|
|
- tblib
|
|
- torch==1.9.0
|
|
conda_packages: []
|
|
|
|
post_build_cmds:
|
|
# Upgrade the Ray Lightning version, otherwise it will be cached in the Anyscale Docker image.
|
|
- echo {{ env["TIMESTAMP"] }}
|
|
- pip3 install -U --force-reinstall ray-lightning pytorch-lightning lightning-bolts
|
|
- pip3 install --force-reinstall torch==1.9.0
|
|
- pip3 install --force-reinstall torchvision==0.10.0
|
|
- pip uninstall -y ray || true && pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }}
|
|
- {{ env["RAY_WHEELS_SANITY_CHECK"] | default("echo No Ray wheels sanity check") }}
|