Add back some tests for xray. (#2772)

This commit is contained in:
Robert Nishihara 2018-08-30 11:07:23 -07:00 committed by Philipp Moritz
parent 9f06c19edd
commit 32f7d6fcf5
4 changed files with 14 additions and 18 deletions

View file

@ -1986,10 +1986,8 @@ def ray_stop():
ray.shutdown()
@unittest.skipIf(
os.environ.get("RAY_USE_XRAY") == "1" or sys.version_info < (3, 0),
"This test does not work with xray yet"
" and is currently failing on Python 2.7.")
@unittest.skipIf(sys.version_info < (3, 0),
"This test is currently failing on Python 2.7.")
def testLifetimeAndTransientResources(ray_stop):
ray.init(num_cpus=1)

View file

@ -2,7 +2,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
import sys
@ -75,9 +74,6 @@ class DistributedArrayTest(unittest.TestCase):
np.zeros([da.BLOCK_SIZE, da.BLOCK_SIZE])
]))
@unittest.skipIf(
os.environ.get("RAY_USE_XRAY") == "1",
"This test does not work with xray yet.")
def testMethods(self):
for module in [
ra.core, ra.random, ra.linalg, da.core, da.random, da.linalg

View file

@ -152,9 +152,6 @@ print("success")
# Make sure the other driver succeeded.
assert "success" in out
@unittest.skipIf(
os.environ.get("RAY_USE_XRAY") == "1",
"This test does not work with xray yet.")
def testDriverExitingQuickly(self):
# This test will create some drivers that submit some tasks and then
# exit without waiting for the tasks to complete.

View file

@ -1675,25 +1675,30 @@ class ResourcesTest(unittest.TestCase):
ray.get(a1.test.remote())
@unittest.skipIf(
os.environ.get("RAY_USE_XRAY") == "1",
"This test does not work with xray yet.")
os.environ.get("RAY_USE_XRAY") != "1",
"This test only works with xray.")
def testZeroCPUs(self):
ray.init(num_cpus=0)
@ray.remote(num_cpus=0)
def f():
return 1
# The task should be able to execute.
ray.get(f.remote())
def testZeroCPUsActor(self):
ray.worker._init(
start_ray_local=True, num_local_schedulers=2, num_cpus=[0, 2])
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(num_cpus=0)
def f():
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Make sure tasks and actors run on the remote local scheduler.
assert ray.get(f.remote()) != local_plasma
a = Foo.remote()
assert ray.get(a.method.remote()) != local_plasma