diff --git a/ci/travis/build-docker-images.py b/ci/travis/build-docker-images.py index 0da2fa05f..62e1b72ea 100644 --- a/ci/travis/build-docker-images.py +++ b/ci/travis/build-docker-images.py @@ -154,6 +154,27 @@ def _build_cpu_gpu_images(image_name, no_cache=True) -> List[str]: return built_images +def _test_ray_ml_libraries(image_tag: str) -> None: + if "gpu" not in image_tag: + return + tf_container = DOCKER_CLIENT.containers.run( + f"rayproject/ray-ml:{image_tag}", + "python -c 'import tensorflow as tf'", + detach=True) + tf_logs = tf_container.logs().decode() + assert "Successfully opened dynamic library libcudart" in tf_logs + tf_container.stop() + + torch_container = DOCKER_CLIENT.containers.run( + f"rayproject/ray-ml:{image_tag}", + "python -c 'import torch; torch.cuda.cudart()'", + detach=True) + torch_logs = torch_container.logs().decode() + assert "Torch not compiled with CUDA enabled" not in torch_logs + assert "Found no NVIDIA driver" in torch_logs + torch_container.stop() + + def copy_wheels(): root_dir = _get_root_dir() wheels = _get_wheel_name(None) @@ -214,6 +235,7 @@ def build_ray_ml(): tag = img.split(":")[-1] DOCKER_CLIENT.api.tag( image=img, repository="rayproject/autoscaler", tag=tag) + _test_ray_ml_libraries(tag) def _get_docker_creds() -> Tuple[str, str]: