[Tune] Fix HEBO evaluated rewards for max mode & save/restore (#14427)

* Fix HEBO evaluated rewards for max mode

* Lint

* Make sure everything necessary is saved
This commit is contained in:
Antoni Baum 2021-03-03 09:44:43 +01:00 committed by GitHub
parent 63c2b7356e
commit 85a092c3d7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -209,7 +209,7 @@ class HEBOSearch(Searcher):
if self._evaluated_rewards:
self._opt.observe(
pd.DataFrame(self._points_to_evaluate),
np.array(self._evaluated_rewards))
np.array(self._evaluated_rewards) * self._metric_op)
else:
self._initial_points = self._points_to_evaluate
@ -280,14 +280,18 @@ class HEBOSearch(Searcher):
numpy_random_state = None
torch_random_state = None
with open(checkpoint_path, "wb") as f:
pickle.dump((self._opt, self._points_to_evaluate,
numpy_random_state, torch_random_state), f)
pickle.dump((self._opt, self._initial_points, numpy_random_state,
torch_random_state, self._live_trial_mapping,
self._n_suggestions, self._suggestions_cache,
self._space, self._hebo_config), f)
def restore(self, checkpoint_path: str):
"""Restoring current optimizer state."""
with open(checkpoint_path, "rb") as f:
(self._opt, self._points_to_evaluate, numpy_random_state,
torch_random_state) = pickle.load(f)
(self._opt, self._initial_points, numpy_random_state,
torch_random_state, self._live_trial_mapping, self._n_suggestions,
self._suggestions_cache, self._space,
self._hebo_config) = pickle.load(f)
if numpy_random_state is not None:
np.random.set_state(numpy_random_state)
if torch_random_state is not None: