2016-09-10 16:39:24 -07:00
|
|
|
from __future__ import print_function
|
|
|
|
|
2016-08-16 16:52:16 -07:00
|
|
|
import os
|
2016-10-11 17:58:14 -07:00
|
|
|
import signal
|
2016-08-18 09:56:20 -07:00
|
|
|
import socket
|
2016-10-14 19:27:17 -07:00
|
|
|
import struct
|
2016-08-16 16:52:16 -07:00
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
import unittest
|
2016-08-18 09:56:20 -07:00
|
|
|
import random
|
|
|
|
import time
|
2016-08-22 15:30:16 -07:00
|
|
|
import tempfile
|
2016-08-16 16:52:16 -07:00
|
|
|
|
|
|
|
import plasma
|
|
|
|
|
2016-10-11 17:58:14 -07:00
|
|
|
USE_VALGRIND = False
|
|
|
|
|
2016-08-18 09:56:20 -07:00
|
|
|
def random_object_id():
|
2016-10-14 19:27:17 -07:00
|
|
|
return "".join([chr(random.randint(0, 255)) for _ in range(plasma.PLASMA_ID_SIZE)])
|
2016-08-18 09:56:20 -07:00
|
|
|
|
2016-09-14 14:20:34 -07:00
|
|
|
def generate_metadata(length):
|
|
|
|
metadata = length * ["\x00"]
|
|
|
|
if length > 0:
|
|
|
|
metadata[0] = chr(random.randint(0, 255))
|
|
|
|
metadata[-1] = chr(random.randint(0, 255))
|
|
|
|
for _ in range(100):
|
|
|
|
metadata[random.randint(0, length - 1)] = chr(random.randint(0, 255))
|
|
|
|
return buffer("".join(metadata))
|
|
|
|
|
|
|
|
def write_to_data_buffer(buff, length):
|
|
|
|
if length > 0:
|
|
|
|
buff[0] = chr(random.randint(0, 255))
|
|
|
|
buff[-1] = chr(random.randint(0, 255))
|
|
|
|
for _ in range(100):
|
|
|
|
buff[random.randint(0, length - 1)] = chr(random.randint(0, 255))
|
|
|
|
|
|
|
|
def create_object(client, data_size, metadata_size, seal=True):
|
|
|
|
object_id = random_object_id()
|
|
|
|
metadata = generate_metadata(metadata_size)
|
|
|
|
memory_buffer = client.create(object_id, data_size, metadata)
|
|
|
|
write_to_data_buffer(memory_buffer, data_size)
|
|
|
|
if seal:
|
|
|
|
client.seal(object_id)
|
|
|
|
return object_id, memory_buffer, metadata
|
|
|
|
|
2016-10-18 18:20:59 -07:00
|
|
|
def assert_get_object_equal(unit_test, client1, client2, object_id, memory_buffer=None, metadata=None):
|
|
|
|
if memory_buffer is not None:
|
|
|
|
unit_test.assertEqual(memory_buffer[:], client2.get(object_id)[:])
|
|
|
|
if metadata is not None:
|
|
|
|
unit_test.assertEqual(metadata[:], client2.get_metadata(object_id)[:])
|
|
|
|
unit_test.assertEqual(client1.get(object_id)[:], client2.get(object_id)[:])
|
|
|
|
unit_test.assertEqual(client1.get_metadata(object_id)[:],
|
|
|
|
client2.get_metadata(object_id)[:])
|
|
|
|
|
2016-08-18 09:56:20 -07:00
|
|
|
class TestPlasmaClient(unittest.TestCase):
|
2016-08-16 16:52:16 -07:00
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
# Start Plasma.
|
|
|
|
plasma_store_executable = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../build/plasma_store")
|
2016-09-07 20:19:37 -07:00
|
|
|
store_name = "/tmp/store{}".format(random.randint(0, 10000))
|
2016-10-11 17:58:14 -07:00
|
|
|
command = [plasma_store_executable, "-s", store_name]
|
|
|
|
if USE_VALGRIND:
|
2016-10-26 23:23:46 -07:00
|
|
|
self.p = subprocess.Popen(["valgrind", "--track-origins=yes", "--leak-check=full", "--show-leak-kinds=all", "--error-exitcode=1"] + command)
|
2016-10-11 17:58:14 -07:00
|
|
|
time.sleep(2.0)
|
|
|
|
else:
|
|
|
|
self.p = subprocess.Popen(command)
|
2016-08-16 16:52:16 -07:00
|
|
|
# Connect to Plasma.
|
2016-09-07 20:19:37 -07:00
|
|
|
self.plasma_client = plasma.PlasmaClient(store_name)
|
2016-08-16 16:52:16 -07:00
|
|
|
|
|
|
|
def tearDown(self):
|
2016-08-18 09:56:20 -07:00
|
|
|
# Kill the plasma store process.
|
2016-10-11 17:58:14 -07:00
|
|
|
if USE_VALGRIND:
|
|
|
|
self.p.send_signal(signal.SIGTERM)
|
|
|
|
self.p.wait()
|
|
|
|
if self.p.returncode != 0:
|
|
|
|
os._exit(-1)
|
|
|
|
else:
|
|
|
|
self.p.kill()
|
2016-08-16 16:52:16 -07:00
|
|
|
|
|
|
|
def test_create(self):
|
2016-08-18 09:56:20 -07:00
|
|
|
# Create an object id string.
|
|
|
|
object_id = random_object_id()
|
2016-08-16 16:52:16 -07:00
|
|
|
# Create a new buffer and write to it.
|
2016-09-14 14:20:34 -07:00
|
|
|
length = 50
|
2016-08-16 16:52:16 -07:00
|
|
|
memory_buffer = self.plasma_client.create(object_id, length)
|
|
|
|
for i in range(length):
|
|
|
|
memory_buffer[i] = chr(i % 256)
|
|
|
|
# Seal the object.
|
|
|
|
self.plasma_client.seal(object_id)
|
|
|
|
# Get the object.
|
|
|
|
memory_buffer = self.plasma_client.get(object_id)
|
|
|
|
for i in range(length):
|
|
|
|
self.assertEqual(memory_buffer[i], chr(i % 256))
|
|
|
|
|
2016-09-14 14:20:34 -07:00
|
|
|
def test_create_with_metadata(self):
|
|
|
|
for length in range(1000):
|
|
|
|
# Create an object id string.
|
|
|
|
object_id = random_object_id()
|
|
|
|
# Create a random metadata string.
|
|
|
|
metadata = generate_metadata(length)
|
|
|
|
# Create a new buffer and write to it.
|
|
|
|
memory_buffer = self.plasma_client.create(object_id, length, metadata)
|
|
|
|
for i in range(length):
|
|
|
|
memory_buffer[i] = chr(i % 256)
|
|
|
|
# Seal the object.
|
|
|
|
self.plasma_client.seal(object_id)
|
|
|
|
# Get the object.
|
|
|
|
memory_buffer = self.plasma_client.get(object_id)
|
|
|
|
for i in range(length):
|
|
|
|
self.assertEqual(memory_buffer[i], chr(i % 256))
|
|
|
|
# Get the metadata.
|
|
|
|
metadata_buffer = self.plasma_client.get_metadata(object_id)
|
|
|
|
self.assertEqual(len(metadata), len(metadata_buffer))
|
|
|
|
for i in range(len(metadata)):
|
|
|
|
self.assertEqual(metadata[i], metadata_buffer[i])
|
|
|
|
|
2016-09-23 15:07:50 -07:00
|
|
|
def test_contains(self):
|
|
|
|
fake_object_ids = [random_object_id() for _ in range(100)]
|
|
|
|
real_object_ids = [random_object_id() for _ in range(100)]
|
|
|
|
for object_id in real_object_ids:
|
|
|
|
self.assertFalse(self.plasma_client.contains(object_id))
|
|
|
|
memory_buffer = self.plasma_client.create(object_id, 100)
|
|
|
|
self.plasma_client.seal(object_id)
|
|
|
|
self.assertTrue(self.plasma_client.contains(object_id))
|
|
|
|
for object_id in fake_object_ids:
|
|
|
|
self.assertFalse(self.plasma_client.contains(object_id))
|
|
|
|
for object_id in real_object_ids:
|
|
|
|
self.assertTrue(self.plasma_client.contains(object_id))
|
|
|
|
|
2016-10-21 00:47:34 -07:00
|
|
|
# def test_individual_delete(self):
|
|
|
|
# length = 100
|
|
|
|
# # Create an object id string.
|
|
|
|
# object_id = random_object_id()
|
|
|
|
# # Create a random metadata string.
|
|
|
|
# metadata = generate_metadata(100)
|
|
|
|
# # Create a new buffer and write to it.
|
|
|
|
# memory_buffer = self.plasma_client.create(object_id, length, metadata)
|
|
|
|
# for i in range(length):
|
|
|
|
# memory_buffer[i] = chr(i % 256)
|
|
|
|
# # Seal the object.
|
|
|
|
# self.plasma_client.seal(object_id)
|
|
|
|
# # Check that the object is present.
|
|
|
|
# self.assertTrue(self.plasma_client.contains(object_id))
|
|
|
|
# # Delete the object.
|
|
|
|
# self.plasma_client.delete(object_id)
|
|
|
|
# # Make sure the object is no longer present.
|
|
|
|
# self.assertFalse(self.plasma_client.contains(object_id))
|
|
|
|
#
|
|
|
|
# def test_delete(self):
|
|
|
|
# # Create some objects.
|
|
|
|
# object_ids = [random_object_id() for _ in range(100)]
|
|
|
|
# for object_id in object_ids:
|
|
|
|
# length = 100
|
|
|
|
# # Create a random metadata string.
|
|
|
|
# metadata = generate_metadata(100)
|
|
|
|
# # Create a new buffer and write to it.
|
|
|
|
# memory_buffer = self.plasma_client.create(object_id, length, metadata)
|
|
|
|
# for i in range(length):
|
|
|
|
# memory_buffer[i] = chr(i % 256)
|
|
|
|
# # Seal the object.
|
|
|
|
# self.plasma_client.seal(object_id)
|
|
|
|
# # Check that the object is present.
|
|
|
|
# self.assertTrue(self.plasma_client.contains(object_id))
|
|
|
|
#
|
|
|
|
# # Delete the objects and make sure they are no longer present.
|
|
|
|
# for object_id in object_ids:
|
|
|
|
# # Delete the object.
|
|
|
|
# self.plasma_client.delete(object_id)
|
|
|
|
# # Make sure the object is no longer present.
|
|
|
|
# self.assertFalse(self.plasma_client.contains(object_id))
|
2016-09-23 15:07:50 -07:00
|
|
|
|
2016-08-16 16:52:16 -07:00
|
|
|
def test_illegal_functionality(self):
|
2016-08-18 09:56:20 -07:00
|
|
|
# Create an object id string.
|
|
|
|
object_id = random_object_id()
|
2016-08-16 16:52:16 -07:00
|
|
|
# Create a new buffer and write to it.
|
|
|
|
length = 1000
|
|
|
|
memory_buffer = self.plasma_client.create(object_id, length)
|
|
|
|
# Make sure we cannot access memory out of bounds.
|
|
|
|
self.assertRaises(Exception, lambda : memory_buffer[length])
|
|
|
|
# Seal the object.
|
|
|
|
self.plasma_client.seal(object_id)
|
|
|
|
# This test is commented out because it currently fails.
|
|
|
|
# # Make sure the object is ready only now.
|
|
|
|
# def illegal_assignment():
|
|
|
|
# memory_buffer[0] = chr(0)
|
|
|
|
# self.assertRaises(Exception, illegal_assignment)
|
|
|
|
# Get the object.
|
|
|
|
memory_buffer = self.plasma_client.get(object_id)
|
|
|
|
# Make sure the object is read only.
|
|
|
|
def illegal_assignment():
|
|
|
|
memory_buffer[0] = chr(0)
|
|
|
|
self.assertRaises(Exception, illegal_assignment)
|
|
|
|
|
2016-10-14 19:27:17 -07:00
|
|
|
def test_subscribe(self):
|
|
|
|
# Subscribe to notifications from the Plasma Store.
|
|
|
|
sock = self.plasma_client.subscribe()
|
|
|
|
for i in [1, 10, 100, 1000, 10000, 100000]:
|
|
|
|
object_ids = [random_object_id() for _ in range(i)]
|
|
|
|
for object_id in object_ids:
|
|
|
|
# Create an object and seal it to trigger a notification.
|
|
|
|
self.plasma_client.create(object_id, 1000)
|
|
|
|
self.plasma_client.seal(object_id)
|
|
|
|
# Check that we received notifications for all of the objects.
|
|
|
|
for object_id in object_ids:
|
|
|
|
message_data = self.plasma_client.get_next_notification()
|
|
|
|
self.assertEqual(object_id, message_data)
|
|
|
|
|
2016-08-18 09:56:20 -07:00
|
|
|
class TestPlasmaManager(unittest.TestCase):
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
# Start two PlasmaStores.
|
|
|
|
plasma_store_executable = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../build/plasma_store")
|
2016-09-07 20:19:37 -07:00
|
|
|
store_name1 = "/tmp/store{}".format(random.randint(0, 10000))
|
|
|
|
store_name2 = "/tmp/store{}".format(random.randint(0, 10000))
|
2016-10-11 17:58:14 -07:00
|
|
|
plasma_store_command1 = [plasma_store_executable, "-s", store_name1]
|
|
|
|
plasma_store_command2 = [plasma_store_executable, "-s", store_name2]
|
|
|
|
|
|
|
|
if USE_VALGRIND:
|
2016-10-26 23:23:46 -07:00
|
|
|
self.p2 = subprocess.Popen(["valgrind", "--track-origins=yes", "--leak-check=full", "--show-leak-kinds=all", "--error-exitcode=1"] + plasma_store_command1)
|
|
|
|
self.p3 = subprocess.Popen(["valgrind", "--track-origins=yes", "--leak-check=full", "--show-leak-kinds=all", "--error-exitcode=1"] + plasma_store_command2)
|
2016-10-11 17:58:14 -07:00
|
|
|
else:
|
|
|
|
self.p2 = subprocess.Popen(plasma_store_command1)
|
|
|
|
self.p3 = subprocess.Popen(plasma_store_command2)
|
|
|
|
|
2016-10-18 18:20:59 -07:00
|
|
|
# Start a Redis server.
|
2016-10-25 22:39:21 -07:00
|
|
|
redis_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../../common/thirdparty/redis-3.2.3/src/redis-server")
|
2016-10-18 18:20:59 -07:00
|
|
|
self.redis_process = None
|
|
|
|
manager_redis_args = []
|
|
|
|
if os.path.exists(redis_path):
|
|
|
|
redis_port = 6379
|
|
|
|
with open(os.devnull, 'w') as FNULL:
|
|
|
|
self.redis_process = subprocess.Popen([redis_path,
|
|
|
|
"--port", str(redis_port)],
|
|
|
|
stdout=FNULL)
|
|
|
|
time.sleep(0.1)
|
|
|
|
manager_redis_args = ["-d", "{addr}:{port}".format(addr="127.0.0.1",
|
|
|
|
port=redis_port)]
|
|
|
|
|
2016-08-18 09:56:20 -07:00
|
|
|
# Start two PlasmaManagers.
|
2016-08-22 15:30:16 -07:00
|
|
|
self.port1 = random.randint(10000, 50000)
|
|
|
|
self.port2 = random.randint(10000, 50000)
|
2016-08-18 09:56:20 -07:00
|
|
|
plasma_manager_executable = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../build/plasma_manager")
|
2016-10-18 18:20:59 -07:00
|
|
|
plasma_manager_command1 = [plasma_manager_executable,
|
|
|
|
"-s", store_name1,
|
|
|
|
"-m", "127.0.0.1",
|
|
|
|
"-p", str(self.port1)] + manager_redis_args
|
|
|
|
plasma_manager_command2 = [plasma_manager_executable,
|
|
|
|
"-s", store_name2,
|
|
|
|
"-m", "127.0.0.1",
|
|
|
|
"-p", str(self.port2)] + manager_redis_args
|
2016-10-11 17:58:14 -07:00
|
|
|
|
|
|
|
if USE_VALGRIND:
|
2016-10-26 23:23:46 -07:00
|
|
|
self.p4 = subprocess.Popen(["valgrind", "--track-origins=yes", "--leak-check=full", "--show-leak-kinds=all", "--error-exitcode=1"] + plasma_manager_command1)
|
|
|
|
self.p5 = subprocess.Popen(["valgrind", "--track-origins=yes", "--leak-check=full", "--show-leak-kinds=all", "--error-exitcode=1"] + plasma_manager_command2)
|
2016-10-11 17:58:14 -07:00
|
|
|
else:
|
|
|
|
self.p4 = subprocess.Popen(plasma_manager_command1)
|
|
|
|
self.p5 = subprocess.Popen(plasma_manager_command2)
|
|
|
|
|
2016-09-05 15:34:11 -07:00
|
|
|
# Connect two PlasmaClients.
|
2016-09-07 20:19:37 -07:00
|
|
|
self.client1 = plasma.PlasmaClient(store_name1, "127.0.0.1", self.port1)
|
|
|
|
self.client2 = plasma.PlasmaClient(store_name2, "127.0.0.1", self.port2)
|
2016-08-18 09:56:20 -07:00
|
|
|
|
|
|
|
def tearDown(self):
|
2016-09-05 15:34:11 -07:00
|
|
|
# Kill the PlasmaStore and PlasmaManager processes.
|
2016-10-11 17:58:14 -07:00
|
|
|
if USE_VALGRIND:
|
|
|
|
self.p4.send_signal(signal.SIGTERM)
|
|
|
|
self.p4.wait()
|
|
|
|
self.p5.send_signal(signal.SIGTERM)
|
|
|
|
self.p5.wait()
|
|
|
|
self.p2.send_signal(signal.SIGTERM)
|
|
|
|
self.p2.wait()
|
|
|
|
self.p3.send_signal(signal.SIGTERM)
|
|
|
|
self.p3.wait()
|
|
|
|
if self.p2.returncode != 0 or self.p3.returncode != 0 or self.p4.returncode != 0 or self.p5.returncode != 0:
|
|
|
|
print("aborting due to valgrind error")
|
|
|
|
os._exit(-1)
|
|
|
|
else:
|
|
|
|
self.p2.kill()
|
|
|
|
self.p3.kill()
|
|
|
|
self.p4.kill()
|
|
|
|
self.p5.kill()
|
2016-10-18 18:20:59 -07:00
|
|
|
if self.redis_process:
|
|
|
|
self.redis_process.kill()
|
|
|
|
|
2016-10-25 22:39:21 -07:00
|
|
|
# def test_fetch(self):
|
|
|
|
# if self.redis_process is None:
|
|
|
|
# print("Cannot test fetch without a running redis instance.")
|
|
|
|
# self.assertTrue(False)
|
|
|
|
# for _ in range(100):
|
|
|
|
# # Create an object.
|
|
|
|
# object_id1, memory_buffer1, metadata1 = create_object(self.client1, 2000, 2000)
|
|
|
|
# # Fetch the object from the other plasma store.
|
|
|
|
# # TODO(swang): This line is a hack! It makes sure that the entry will be
|
|
|
|
# # in the object table once we call the fetch operation. Remove once
|
|
|
|
# # retries are implemented by Ray common.
|
|
|
|
# time.sleep(0.1)
|
|
|
|
# successes = self.client2.fetch([object_id1])
|
|
|
|
# self.assertEqual(successes, [True])
|
|
|
|
# # Compare the two buffers.
|
|
|
|
# assert_get_object_equal(self, self.client1, self.client2, object_id1,
|
|
|
|
# memory_buffer=memory_buffer1, metadata=metadata1)
|
|
|
|
# # Fetch in the other direction. These should return quickly because
|
|
|
|
# # client1 already has the object.
|
|
|
|
# successes = self.client1.fetch([object_id1])
|
|
|
|
# self.assertEqual(successes, [True])
|
|
|
|
# assert_get_object_equal(self, self.client2, self.client1, object_id1,
|
|
|
|
# memory_buffer=memory_buffer1, metadata=metadata1)
|
2016-10-18 18:20:59 -07:00
|
|
|
|
2016-10-25 22:39:21 -07:00
|
|
|
# def test_fetch_multiple(self):
|
|
|
|
# if self.redis_process is None:
|
|
|
|
# print("Cannot test fetch without a running redis instance.")
|
|
|
|
# self.assertTrue(False)
|
|
|
|
# for _ in range(20):
|
|
|
|
# # Create two objects and a third fake one that doesn't exist.
|
|
|
|
# object_id1, memory_buffer1, metadata1 = create_object(self.client1, 2000, 2000)
|
|
|
|
# missing_object_id = random_object_id()
|
|
|
|
# object_id2, memory_buffer2, metadata2 = create_object(self.client1, 2000, 2000)
|
|
|
|
# object_ids = [object_id1, missing_object_id, object_id2]
|
|
|
|
# # Fetch the objects from the other plasma store. The second object ID
|
|
|
|
# # should timeout since it does not exist.
|
|
|
|
# # TODO(swang): This line is a hack! It makes sure that the entry will be
|
|
|
|
# # in the object table once we call the fetch operation. Remove once
|
|
|
|
# # retries are implemented by Ray common.
|
|
|
|
# time.sleep(0.1)
|
|
|
|
# successes = self.client2.fetch(object_ids)
|
|
|
|
# self.assertEqual(successes, [True, False, True])
|
|
|
|
# # Compare the buffers of the objects that do exist.
|
|
|
|
# assert_get_object_equal(self, self.client1, self.client2, object_id1,
|
|
|
|
# memory_buffer=memory_buffer1, metadata=metadata1)
|
|
|
|
# assert_get_object_equal(self, self.client1, self.client2, object_id2,
|
|
|
|
# memory_buffer=memory_buffer2, metadata=metadata2)
|
|
|
|
# # Fetch in the other direction. The fake object still does not exist.
|
|
|
|
# successes = self.client1.fetch(object_ids)
|
|
|
|
# self.assertEqual(successes, [True, False, True])
|
|
|
|
# assert_get_object_equal(self, self.client2, self.client1, object_id1,
|
|
|
|
# memory_buffer=memory_buffer1, metadata=metadata1)
|
|
|
|
# assert_get_object_equal(self, self.client2, self.client1, object_id2,
|
|
|
|
# memory_buffer=memory_buffer2, metadata=metadata2)
|
2016-08-18 09:56:20 -07:00
|
|
|
|
|
|
|
def test_transfer(self):
|
2016-09-13 16:45:44 -07:00
|
|
|
for _ in range(100):
|
2016-09-14 14:20:34 -07:00
|
|
|
# Create an object.
|
|
|
|
object_id1, memory_buffer1, metadata1 = create_object(self.client1, 2000, 2000)
|
2016-09-13 16:45:44 -07:00
|
|
|
# Transfer the buffer to the the other PlasmaStore.
|
|
|
|
self.client1.transfer("127.0.0.1", self.port2, object_id1)
|
|
|
|
# Compare the two buffers.
|
2016-10-18 18:20:59 -07:00
|
|
|
assert_get_object_equal(self, self.client1, self.client2, object_id1,
|
|
|
|
memory_buffer=memory_buffer1, metadata=metadata1)
|
2016-10-21 00:47:34 -07:00
|
|
|
# # Transfer the buffer again.
|
|
|
|
# self.client1.transfer("127.0.0.1", self.port2, object_id1)
|
|
|
|
# # Compare the two buffers.
|
|
|
|
# assert_get_object_equal(self, self.client1, self.client2, object_id1,
|
|
|
|
# memory_buffer=memory_buffer1, metadata=metadata1)
|
2016-09-14 14:20:34 -07:00
|
|
|
|
|
|
|
# Create an object.
|
|
|
|
object_id2, memory_buffer2, metadata2 = create_object(self.client2, 20000, 20000)
|
2016-09-13 16:45:44 -07:00
|
|
|
# Transfer the buffer to the the other PlasmaStore.
|
|
|
|
self.client2.transfer("127.0.0.1", self.port1, object_id2)
|
|
|
|
# Compare the two buffers.
|
2016-10-18 18:20:59 -07:00
|
|
|
assert_get_object_equal(self, self.client1, self.client2, object_id2,
|
|
|
|
memory_buffer=memory_buffer2, metadata=metadata2)
|
2016-08-18 09:56:20 -07:00
|
|
|
|
|
|
|
def test_illegal_functionality(self):
|
|
|
|
# Create an object id string.
|
|
|
|
object_id = random_object_id()
|
|
|
|
# Create a new buffer.
|
2016-10-11 17:58:14 -07:00
|
|
|
# memory_buffer = self.client1.create(object_id, 20000)
|
2016-08-18 09:56:20 -07:00
|
|
|
# This test is commented out because it currently fails.
|
|
|
|
# # Transferring the buffer before sealing it should fail.
|
|
|
|
# self.assertRaises(Exception, lambda : self.manager1.transfer(1, object_id))
|
|
|
|
|
2016-09-10 16:39:24 -07:00
|
|
|
def test_stresstest(self):
|
|
|
|
a = time.time()
|
|
|
|
object_ids = []
|
|
|
|
for i in range(10000): # TODO(pcm): increase this to 100000
|
|
|
|
object_id = random_object_id()
|
|
|
|
object_ids.append(object_id)
|
|
|
|
self.client1.create(object_id, 1)
|
|
|
|
self.client1.seal(object_id)
|
|
|
|
for object_id in object_ids:
|
|
|
|
self.client1.transfer("127.0.0.1", self.port2, object_id)
|
|
|
|
b = time.time() - a
|
|
|
|
|
|
|
|
print("it took", b, "seconds to put and transfer the objects")
|
|
|
|
|
2016-08-16 16:52:16 -07:00
|
|
|
if __name__ == "__main__":
|
2016-10-11 17:58:14 -07:00
|
|
|
if len(sys.argv) > 1:
|
|
|
|
# pop the argument so we don't mess with unittest's own argument parser
|
2016-10-18 18:20:59 -07:00
|
|
|
if sys.argv[-1] == "valgrind":
|
|
|
|
arg = sys.argv.pop()
|
2016-10-11 17:58:14 -07:00
|
|
|
USE_VALGRIND = True
|
|
|
|
print("Using valgrind for tests")
|
2016-08-16 16:52:16 -07:00
|
|
|
unittest.main(verbosity=2)
|