Move tf.test.is_gpu_available() to after session init (#6515)

* move to after session init

* script fixes
This commit is contained in:
Eric Liang 2019-12-17 14:55:39 -08:00 committed by GitHub
parent 4d71ab83cf
commit 2530eb90dc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 15 additions and 15 deletions

View file

@ -26,14 +26,14 @@ Create them at https://github.com/settings/tokens/new
def run(access_token, prev_release_commit, curr_release_commit):
print("Writing commit descriptions to 'commits.txt'...")
check_output(
("'git log {prev_release_commit}..{curr_release_commit} "
(f"git log {prev_release_commit}..{curr_release_commit} "
f"--pretty=format:'%s' > commits.txt"),
shell=True)
# Generate command
cmd = []
cmd.append((f"git log {prev_release_commit}..{curr_release_commit} "
f"--pretty=format:'%s' "
f" | grep -Eo '#(\d+)'"))
f"--pretty=format:\"%s\" "
f" | grep -Eo \"#(\d+)\""))
joined = " && ".join(cmd)
cmd = f"bash -c '{joined}'"
cmd = shlex.split(cmd)

View file

@ -329,18 +329,6 @@ class RolloutWorker(EvaluatorInterface):
tf.executing_eagerly()):
if not tf:
raise ImportError("Could not import tensorflow")
if (ray.is_initialized()
and ray.worker._mode() != ray.worker.LOCAL_MODE):
if not ray.get_gpu_ids():
logger.debug(
"Creating policy evaluation worker {}".format(
worker_index) +
" on CPU (please ignore any CUDA init errors)")
elif not tf.test.is_gpu_available():
raise RuntimeError(
"GPUs were assigned to this worker by Ray, but "
"TensorFlow reports GPU acceleration is disabled. "
"This could be due to a bad CUDA or TF installation.")
with tf.Graph().as_default():
if tf_session_creator:
self.tf_sess = tf_session_creator()
@ -354,6 +342,18 @@ class RolloutWorker(EvaluatorInterface):
tf.set_random_seed(seed)
self.policy_map, self.preprocessors = \
self._build_policy_map(policy_dict, policy_config)
if (ray.is_initialized()
and ray.worker._mode() != ray.worker.LOCAL_MODE):
if not ray.get_gpu_ids():
logger.debug(
"Creating policy evaluation worker {}".format(
worker_index) +
" on CPU (please ignore any CUDA init errors)")
elif not tf.test.is_gpu_available():
raise RuntimeError(
"GPUs were assigned to this worker by Ray, but "
"TensorFlow reports GPU acceleration is disabled. "
"This could be due to a bad CUDA or TF installation.")
else:
self.policy_map, self.preprocessors = self._build_policy_map(
policy_dict, policy_config)