Fix linting on master branch (#6174)

This commit is contained in:
Philipp Moritz 2019-11-16 10:02:58 -08:00 committed by GitHub
parent a68cda0a33
commit fc655acfee
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 15 additions and 20 deletions

View file

@ -279,7 +279,7 @@ class LoadMetrics(object):
now - t for t in self.last_heartbeat_time_by_ip.values()
]
most_delayed_heartbeats = sorted(
list(self.last_heartbeat_time_by_ip.items()),
self.last_heartbeat_time_by_ip.items(),
key=lambda pair: pair[1])[:5]
most_delayed_heartbeats = {
ip: (now - t)

View file

@ -88,17 +88,12 @@ def teardown_cluster(config_file, yes, workers_only, override_cluster_name):
if workers_only:
A = []
else:
A = [
node_id for node_id in provider.non_terminated_nodes({
TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD
})
]
A += [
node_id for node_id in provider.non_terminated_nodes({
TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER
A = provider.non_terminated_nodes({
TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD
})
]
A += provider.non_terminated_nodes({
TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER
})
return A
# Loop here to check that both the head and worker nodes are actually

View file

@ -139,7 +139,7 @@ def receive(sources, timeout=None):
# redis expects ms.
query += str(timeout_ms)
query += " STREAMS "
query += " ".join([task_id for task_id in task_id_to_sources])
query += " ".join(task_id_to_sources)
query += " "
query += " ".join([
ray.utils.decode(signal_counters[ray.utils.hex_to_binary(task_id)])

View file

@ -2523,7 +2523,7 @@ def test_checkpointing_on_node_failure(ray_start_cluster_2_nodes,
"""Test actor checkpointing on a remote node."""
# Place the actor on the remote node.
cluster = ray_start_cluster_2_nodes
remote_node = [node for node in cluster.worker_nodes]
remote_node = list(cluster.worker_nodes)
actor_cls = ray.remote(max_reconstructions=1)(ray_checkpointable_actor_cls)
actor = actor_cls.remote()
while (ray.get(actor.node_id.remote()) != remote_node[0].unique_id):

View file

@ -59,7 +59,7 @@ class AutoMLSearcherTest(unittest.TestCase):
self.assertEqual(len(searcher.next_trials()), 0)
for i, trial in enumerate(trials):
rewards = [x for x in range(i, i + 10)]
rewards = list(range(i, i + 10))
random.shuffle(rewards)
for reward in rewards:
searcher.on_trial_result(trial.trial_id, {"reward": reward})

View file

@ -590,7 +590,7 @@ class HyperbandSuite(unittest.TestCase):
def testRemove(self):
"""Test with 4: start 1, remove 1 pending, add 2, remove 1 pending."""
sched, runner = self.schedulerSetup(4)
trials = sorted(list(sched._trial_info), key=lambda t: t.trial_id)
trials = sorted(sched._trial_info, key=lambda t: t.trial_id)
runner._launch_trial(trials[0])
sched.on_trial_result(runner, trials[0], result(1, 5))
self.assertEqual(trials[0].status, Trial.RUNNING)

View file

@ -202,7 +202,7 @@ def RunnerHandler(runner):
path = parts.path
if path == "/trials":
return [t for t in runner.get_trials()]
return list(runner.get_trials())
else:
trial_id = path.split("/")[-1]
return runner.get_trial(trial_id)

View file

@ -95,7 +95,7 @@ def stats(policy, train_batch):
"policy_loss": policy.loss.pi_loss,
"policy_entropy": policy.loss.entropy,
"var_gnorm": tf.global_norm(
[x for x in policy.model.trainable_variables()]),
list(policy.model.trainable_variables())),
"vf_loss": policy.loss.vf_loss,
}

View file

@ -95,7 +95,7 @@ class AlwaysSameHeuristic(Policy):
info_batch=None,
episodes=None,
**kwargs):
return [x for x in state_batches[0]], state_batches, {}
return list(state_batches[0]), state_batches, {}
def learn_on_batch(self, samples):
pass

View file

@ -239,7 +239,7 @@ class DictFlatteningPreprocessor(Preprocessor):
@override(Preprocessor)
def write(self, observation, array, offset):
if not isinstance(observation, OrderedDict):
observation = OrderedDict(sorted(list(observation.items())))
observation = OrderedDict(sorted(observation.items()))
assert len(observation) == len(self.preprocessors), \
(len(observation), len(self.preprocessors))
for o, p in zip(observation.values(), self.preprocessors):

View file

@ -71,7 +71,7 @@ class InputReader(object):
"tf_input_ops() is not implemented for multi agent batches")
keys = [
k for k in sorted(list(batch.keys()))
k for k in sorted(batch.keys())
if np.issubdtype(batch[k].dtype, np.number)
]
dtypes = [batch[k].dtype for k in keys]