diff --git a/python/ray/autoscaler/autoscaler.py b/python/ray/autoscaler/autoscaler.py index 1bb96018a..e6b9e5c68 100644 --- a/python/ray/autoscaler/autoscaler.py +++ b/python/ray/autoscaler/autoscaler.py @@ -279,7 +279,7 @@ class LoadMetrics(object): now - t for t in self.last_heartbeat_time_by_ip.values() ] most_delayed_heartbeats = sorted( - list(self.last_heartbeat_time_by_ip.items()), + self.last_heartbeat_time_by_ip.items(), key=lambda pair: pair[1])[:5] most_delayed_heartbeats = { ip: (now - t) diff --git a/python/ray/autoscaler/commands.py b/python/ray/autoscaler/commands.py index 9367baa29..f0b836748 100644 --- a/python/ray/autoscaler/commands.py +++ b/python/ray/autoscaler/commands.py @@ -88,17 +88,12 @@ def teardown_cluster(config_file, yes, workers_only, override_cluster_name): if workers_only: A = [] else: - A = [ - node_id for node_id in provider.non_terminated_nodes({ - TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD - }) - ] - - A += [ - node_id for node_id in provider.non_terminated_nodes({ - TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER + A = provider.non_terminated_nodes({ + TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD }) - ] + A += provider.non_terminated_nodes({ + TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER + }) return A # Loop here to check that both the head and worker nodes are actually diff --git a/python/ray/experimental/signal.py b/python/ray/experimental/signal.py index f9b3d1c41..147af8a0a 100644 --- a/python/ray/experimental/signal.py +++ b/python/ray/experimental/signal.py @@ -139,7 +139,7 @@ def receive(sources, timeout=None): # redis expects ms. query += str(timeout_ms) query += " STREAMS " - query += " ".join([task_id for task_id in task_id_to_sources]) + query += " ".join(task_id_to_sources) query += " " query += " ".join([ ray.utils.decode(signal_counters[ray.utils.hex_to_binary(task_id)]) diff --git a/python/ray/tests/test_actor.py b/python/ray/tests/test_actor.py index daf9833a6..c16019a89 100644 --- a/python/ray/tests/test_actor.py +++ b/python/ray/tests/test_actor.py @@ -2523,7 +2523,7 @@ def test_checkpointing_on_node_failure(ray_start_cluster_2_nodes, """Test actor checkpointing on a remote node.""" # Place the actor on the remote node. cluster = ray_start_cluster_2_nodes - remote_node = [node for node in cluster.worker_nodes] + remote_node = list(cluster.worker_nodes) actor_cls = ray.remote(max_reconstructions=1)(ray_checkpointable_actor_cls) actor = actor_cls.remote() while (ray.get(actor.node_id.remote()) != remote_node[0].unique_id): diff --git a/python/ray/tune/tests/test_automl_searcher.py b/python/ray/tune/tests/test_automl_searcher.py index 1dd153558..1f2818e13 100644 --- a/python/ray/tune/tests/test_automl_searcher.py +++ b/python/ray/tune/tests/test_automl_searcher.py @@ -59,7 +59,7 @@ class AutoMLSearcherTest(unittest.TestCase): self.assertEqual(len(searcher.next_trials()), 0) for i, trial in enumerate(trials): - rewards = [x for x in range(i, i + 10)] + rewards = list(range(i, i + 10)) random.shuffle(rewards) for reward in rewards: searcher.on_trial_result(trial.trial_id, {"reward": reward}) diff --git a/python/ray/tune/tests/test_trial_scheduler.py b/python/ray/tune/tests/test_trial_scheduler.py index cdbc02ea9..559f9de4d 100644 --- a/python/ray/tune/tests/test_trial_scheduler.py +++ b/python/ray/tune/tests/test_trial_scheduler.py @@ -590,7 +590,7 @@ class HyperbandSuite(unittest.TestCase): def testRemove(self): """Test with 4: start 1, remove 1 pending, add 2, remove 1 pending.""" sched, runner = self.schedulerSetup(4) - trials = sorted(list(sched._trial_info), key=lambda t: t.trial_id) + trials = sorted(sched._trial_info, key=lambda t: t.trial_id) runner._launch_trial(trials[0]) sched.on_trial_result(runner, trials[0], result(1, 5)) self.assertEqual(trials[0].status, Trial.RUNNING) diff --git a/python/ray/tune/web_server.py b/python/ray/tune/web_server.py index 022ccf12b..af1acae94 100644 --- a/python/ray/tune/web_server.py +++ b/python/ray/tune/web_server.py @@ -202,7 +202,7 @@ def RunnerHandler(runner): path = parts.path if path == "/trials": - return [t for t in runner.get_trials()] + return list(runner.get_trials()) else: trial_id = path.split("/")[-1] return runner.get_trial(trial_id) diff --git a/rllib/agents/a3c/a3c_tf_policy.py b/rllib/agents/a3c/a3c_tf_policy.py index 1fb1ed2ba..2bb5d8bae 100644 --- a/rllib/agents/a3c/a3c_tf_policy.py +++ b/rllib/agents/a3c/a3c_tf_policy.py @@ -95,7 +95,7 @@ def stats(policy, train_batch): "policy_loss": policy.loss.pi_loss, "policy_entropy": policy.loss.entropy, "var_gnorm": tf.global_norm( - [x for x in policy.model.trainable_variables()]), + list(policy.model.trainable_variables())), "vf_loss": policy.loss.vf_loss, } diff --git a/rllib/examples/rock_paper_scissors_multiagent.py b/rllib/examples/rock_paper_scissors_multiagent.py index e340e3612..eefa8085a 100644 --- a/rllib/examples/rock_paper_scissors_multiagent.py +++ b/rllib/examples/rock_paper_scissors_multiagent.py @@ -95,7 +95,7 @@ class AlwaysSameHeuristic(Policy): info_batch=None, episodes=None, **kwargs): - return [x for x in state_batches[0]], state_batches, {} + return list(state_batches[0]), state_batches, {} def learn_on_batch(self, samples): pass diff --git a/rllib/models/preprocessors.py b/rllib/models/preprocessors.py index 947c84d59..e0a33d9b7 100644 --- a/rllib/models/preprocessors.py +++ b/rllib/models/preprocessors.py @@ -239,7 +239,7 @@ class DictFlatteningPreprocessor(Preprocessor): @override(Preprocessor) def write(self, observation, array, offset): if not isinstance(observation, OrderedDict): - observation = OrderedDict(sorted(list(observation.items()))) + observation = OrderedDict(sorted(observation.items())) assert len(observation) == len(self.preprocessors), \ (len(observation), len(self.preprocessors)) for o, p in zip(observation.values(), self.preprocessors): diff --git a/rllib/offline/input_reader.py b/rllib/offline/input_reader.py index 053c27934..163a15e1b 100644 --- a/rllib/offline/input_reader.py +++ b/rllib/offline/input_reader.py @@ -71,7 +71,7 @@ class InputReader(object): "tf_input_ops() is not implemented for multi agent batches") keys = [ - k for k in sorted(list(batch.keys())) + k for k in sorted(batch.keys()) if np.issubdtype(batch[k].dtype, np.number) ] dtypes = [batch[k].dtype for k in keys]