diff --git a/deploy/charts/ray/values.yaml b/deploy/charts/ray/values.yaml index 6c1416b5f..eba9705b2 100644 --- a/deploy/charts/ray/values.yaml +++ b/deploy/charts/ray/values.yaml @@ -19,7 +19,7 @@ podTypes: CPU: 1 # memory is the memory used by this Pod type. # (Used for both requests and limits.) - memory: 512Mi + memory: 1Gi # GPU is the number of NVIDIA GPUs used by this pod type. # (Optional, requires GPU nodes with appropriate setup. See https://docs.ray.io/en/master/cluster/kubernetes-gpu.html) GPU: 0 @@ -49,7 +49,7 @@ podTypes: maxWorkers: 3 # memory is the memory used by this Pod type. # (Used for both requests and limits.) - memory: 512Mi + memory: 1Gi # CPU is the number of CPUs used by this pod type. # (Used for both requests and limits. Must be an integer, as Ray does not support fractional CPUs.) CPU: 1 diff --git a/deploy/components/example_cluster.yaml b/deploy/components/example_cluster.yaml index e27eb1be5..9de770d5d 100644 --- a/deploy/components/example_cluster.yaml +++ b/deploy/components/example_cluster.yaml @@ -68,9 +68,10 @@ spec: resources: requests: cpu: 1000m - memory: 512Mi + memory: 1Gi ephemeral-storage: 1Gi limits: + cpu: 1000m # The maximum memory that this pod is allowed to use. The # limit will be detected by ray and split to use 10% for # redis, 30% for the shared memory object store, and the @@ -78,7 +79,7 @@ spec: # the object store size is not set manually, ray will # allocate a very large object store in each pod that may # cause problems for other pods. - memory: 512Mi + memory: 1Gi - name: worker-node # Minimum number of Ray workers of this Pod type. minWorkers: 2 @@ -114,9 +115,10 @@ spec: resources: requests: cpu: 1000m - memory: 512Mi + memory: 1Gi ephemeral-storage: 1Gi limits: + cpu: 1000m # The maximum memory that this pod is allowed to use. The # limit will be detected by ray and split to use 10% for # redis, 30% for the shared memory object store, and the @@ -124,7 +126,7 @@ spec: # the object store size is not set manually, ray will # allocate a very large object store in each pod that may # cause problems for other pods. - memory: 512Mi + memory: 1Gi # Commands to start Ray on the head node. You don't need to change this. # Note dashboard-host is set to 0.0.0.0 so that Kubernetes can port forward. headStartRayCommands: diff --git a/python/ray/tests/kubernetes_e2e/test_k8s_operator_basic.py b/python/ray/tests/kubernetes_e2e/test_k8s_operator_basic.py index 13d0aa71c..74aa5d893 100644 --- a/python/ray/tests/kubernetes_e2e/test_k8s_operator_basic.py +++ b/python/ray/tests/kubernetes_e2e/test_k8s_operator_basic.py @@ -299,7 +299,6 @@ class KubernetesOperatorTest(unittest.TestCase): @ray.remote class Test: - @ray.method() def method(self): return "success" diff --git a/release/kubernetes_manual_tests/README.md b/release/kubernetes_manual_tests/README.md index 12b61f272..4f4967bc5 100644 --- a/release/kubernetes_manual_tests/README.md +++ b/release/kubernetes_manual_tests/README.md @@ -7,9 +7,10 @@ If you have issues running them, bug the code owner(s) for OSS Kubernetes suppor 1. Configure kubectl and Helm 3 to access a K8s cluster. 2. `git checkout releases/` 3. You might have to locally pip install the Ray wheel for the relevant commit (or pip install -e) in a conda env, see Ray client note below. -4. cd to this directory -5. `IMAGE=rayproject/ray: bash k8s_release_tests.sh` -6. Test outcomes will be reported at the end of the output. +4. You might have to temporarily delete the file `ray/python/ray/tests/conftest.py`. +5. cd to this directory +6. `IMAGE=rayproject/ray: bash k8s_release_tests.sh` +7. Test outcomes will be reported at the end of the output. This runs three tests and does the necessary resource creation/teardown. The tests typically take about 15 minutes to finish.