mirror of
https://github.com/vale981/ray
synced 2025-03-06 10:31:39 -05:00
[tune] enable points_to_eval
for all search algorithms (#12790)
Co-authored-by: Richard Liaw <rliaw@berkeley.edu>
This commit is contained in:
parent
fdd85e3af4
commit
ea1228074d
12 changed files with 328 additions and 147 deletions
|
@ -17,9 +17,9 @@ from ray.tune.suggest.dragonfly import DragonflySearch
|
|||
|
||||
def objective(config):
|
||||
for i in range(config["iterations"]):
|
||||
vol1 = config["point"][0] # LiNO3
|
||||
vol2 = config["point"][1] # Li2SO4
|
||||
vol3 = config["point"][2] # NaClO4
|
||||
vol1 = config["LiNO3_vol"] # LiNO3
|
||||
vol2 = config["Li2SO4_vol"] # Li2SO4
|
||||
vol3 = config["NaClO4_vol"] # NaClO4
|
||||
vol4 = 10 - (vol1 + vol2 + vol3) # Water
|
||||
# Synthetic functions
|
||||
conductivity = vol1 + 0.1 * (vol2 + vol3)**2 + 2.3 * vol4 * (vol1**1.5)
|
||||
|
|
|
@ -43,7 +43,18 @@ if __name__ == "__main__":
|
|||
# "activation": ["relu", "tanh"]
|
||||
# }
|
||||
|
||||
previously_run_params = [[10, 0, "relu"], [15, -20, "tanh"]]
|
||||
previously_run_params = [
|
||||
{
|
||||
"width": 10,
|
||||
"height": 0,
|
||||
"activation": "relu" # Activation will be relu
|
||||
},
|
||||
{
|
||||
"width": 15,
|
||||
"height": -20,
|
||||
"activation": "tanh" # Activation will be tanh
|
||||
}
|
||||
]
|
||||
known_rewards = [-189, -1144]
|
||||
|
||||
algo = SkOptSearch(
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import copy
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from ax.service.ax_client import AxClient
|
||||
|
@ -50,6 +51,11 @@ class AxSearch(Searcher):
|
|||
the `ray.tune.result.DEFAULT_METRIC` will be used per default.
|
||||
mode (str): One of {min, max}. Determines whether objective is
|
||||
minimizing or maximizing the metric attribute. Defaults to "max".
|
||||
points_to_evaluate (list): Initial parameter suggestions to be run
|
||||
first. This is for when you already have some good parameters
|
||||
you want to run first to help the algorithm make better suggestions
|
||||
for future parameters. Needs to be a list of dicts containing the
|
||||
configurations.
|
||||
parameter_constraints (list[str]): Parameter constraints, such as
|
||||
"x3 >= x4" or "x3 + x4 >= 2".
|
||||
outcome_constraints (list[str]): Outcome constraints of form
|
||||
|
@ -110,6 +116,7 @@ class AxSearch(Searcher):
|
|||
space: Optional[Union[Dict, List[Dict]]] = None,
|
||||
metric: Optional[str] = None,
|
||||
mode: Optional[str] = None,
|
||||
points_to_evaluate: Optional[List[Dict]] = None,
|
||||
parameter_constraints: Optional[List] = None,
|
||||
outcome_constraints: Optional[List] = None,
|
||||
ax_client: Optional[AxClient] = None,
|
||||
|
@ -141,6 +148,8 @@ class AxSearch(Searcher):
|
|||
self._parameter_constraints = parameter_constraints
|
||||
self._outcome_constraints = outcome_constraints
|
||||
|
||||
self._points_to_evaluate = copy.deepcopy(points_to_evaluate)
|
||||
|
||||
self.max_concurrent = max_concurrent
|
||||
|
||||
self._objective_name = metric
|
||||
|
@ -226,7 +235,13 @@ class AxSearch(Searcher):
|
|||
if self.max_concurrent:
|
||||
if len(self._live_trial_mapping) >= self.max_concurrent:
|
||||
return None
|
||||
parameters, trial_index = self._ax.get_next_trial()
|
||||
|
||||
if self._points_to_evaluate:
|
||||
config = self._points_to_evaluate.pop(0)
|
||||
parameters, trial_index = self._ax.attach_trial(config)
|
||||
else:
|
||||
parameters, trial_index = self._ax.get_next_trial()
|
||||
|
||||
self._live_trial_mapping[trial_id] = trial_index
|
||||
return unflatten_dict(parameters)
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ from collections import defaultdict
|
|||
import logging
|
||||
import pickle
|
||||
import json
|
||||
from typing import Dict, Optional, Tuple
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
from ray.tune import ExperimentAnalysis
|
||||
from ray.tune.result import DEFAULT_METRIC
|
||||
|
@ -59,6 +59,11 @@ class BayesOptSearch(Searcher):
|
|||
per default.
|
||||
mode (str): One of {min, max}. Determines whether objective is
|
||||
minimizing or maximizing the metric attribute.
|
||||
points_to_evaluate (list): Initial parameter suggestions to be run
|
||||
first. This is for when you already have some good parameters
|
||||
you want to run first to help the algorithm make better suggestions
|
||||
for future parameters. Needs to be a list of dicts containing the
|
||||
configurations.
|
||||
utility_kwargs (dict): Parameters to define the utility function.
|
||||
The default value is a dictionary with three keys:
|
||||
- kind: ucb (Upper Confidence Bound)
|
||||
|
@ -112,6 +117,7 @@ class BayesOptSearch(Searcher):
|
|||
space: Optional[Dict] = None,
|
||||
metric: Optional[str] = None,
|
||||
mode: Optional[str] = None,
|
||||
points_to_evaluate: Optional[List[Dict]] = None,
|
||||
utility_kwargs: Optional[Dict] = None,
|
||||
random_state: int = 42,
|
||||
random_search_steps: int = 10,
|
||||
|
@ -121,35 +127,6 @@ class BayesOptSearch(Searcher):
|
|||
analysis: Optional[ExperimentAnalysis] = None,
|
||||
max_concurrent: Optional[int] = None,
|
||||
use_early_stopped_trials: Optional[bool] = None):
|
||||
"""Instantiate new BayesOptSearch object.
|
||||
|
||||
Args:
|
||||
space (dict): Continuous search space.
|
||||
Parameters will be sampled from
|
||||
this space which will be used to run trials.
|
||||
metric (str): The training result objective value attribute.
|
||||
mode (str): One of {min, max}. Determines whether objective is
|
||||
minimizing or maximizing the metric attribute.
|
||||
utility_kwargs (dict): Parameters to define the utility function.
|
||||
Must provide values for the keys `kind`, `kappa`, and `xi`.
|
||||
random_state (int): Used to initialize BayesOpt.
|
||||
random_search_steps (int): Number of initial random searches.
|
||||
This is necessary to avoid initial local overfitting
|
||||
of the Bayesian process.
|
||||
patience (int): Must be > 0. If the optimizer suggests a set of
|
||||
hyperparameters more than 'patience' times,
|
||||
then the whole experiment will stop.
|
||||
skip_duplicate (bool): If true, BayesOptSearch will not create
|
||||
a trial with a previously seen set of hyperparameters. By
|
||||
default, floating values will be reduced to a digit precision
|
||||
of 5. You can override this by setting
|
||||
``searcher.repeat_float_precision``.
|
||||
analysis (ExperimentAnalysis): Optionally, the previous analysis
|
||||
to integrate.
|
||||
verbose (int): Sets verbosity level for BayesOpt packages.
|
||||
max_concurrent: Deprecated.
|
||||
use_early_stopped_trials: Deprecated.
|
||||
"""
|
||||
assert byo is not None, (
|
||||
"BayesOpt must be installed!. You can install BayesOpt with"
|
||||
" the command: `pip install bayesian-optimization`.")
|
||||
|
@ -183,6 +160,8 @@ class BayesOptSearch(Searcher):
|
|||
elif mode == "min":
|
||||
self._metric_op = -1.
|
||||
|
||||
self._points_to_evaluate = points_to_evaluate
|
||||
|
||||
self._live_trial_mapping = {}
|
||||
self._buffered_trial_results = []
|
||||
self.random_search_trials = random_search_steps
|
||||
|
@ -269,8 +248,11 @@ class BayesOptSearch(Searcher):
|
|||
# we stop the suggestion and return None.
|
||||
return None
|
||||
|
||||
# We compute the new point to explore
|
||||
config = self.optimizer.suggest(self.utility)
|
||||
if self._points_to_evaluate:
|
||||
config = self._points_to_evaluate.pop(0)
|
||||
else:
|
||||
# We compute the new point to explore
|
||||
config = self.optimizer.suggest(self.utility)
|
||||
|
||||
config_hash = _dict_hash(config, self.repeat_float_precision)
|
||||
# Check if already computed
|
||||
|
@ -369,16 +351,16 @@ class BayesOptSearch(Searcher):
|
|||
def save(self, checkpoint_path: str):
|
||||
"""Storing current optimizer state."""
|
||||
with open(checkpoint_path, "wb") as f:
|
||||
pickle.dump(
|
||||
(self.optimizer, self._buffered_trial_results,
|
||||
self._total_random_search_trials, self._config_counter), f)
|
||||
pickle.dump((self.optimizer, self._buffered_trial_results,
|
||||
self._total_random_search_trials,
|
||||
self._config_counter, self._points_to_evaluate), f)
|
||||
|
||||
def restore(self, checkpoint_path: str):
|
||||
"""Restoring current optimizer state."""
|
||||
with open(checkpoint_path, "rb") as f:
|
||||
(self.optimizer, self._buffered_trial_results,
|
||||
self._total_random_search_trials,
|
||||
self._config_counter) = pickle.load(f)
|
||||
self._total_random_search_trials, self._config_counter,
|
||||
self._points_to_evaluate) = pickle.load(f)
|
||||
|
||||
@staticmethod
|
||||
def convert_search_space(spec: Dict, join: bool = False) -> Dict:
|
||||
|
@ -403,7 +385,7 @@ class BayesOptSearch(Searcher):
|
|||
logger.warning(
|
||||
"BayesOpt does not support specific sampling methods. "
|
||||
"The {} sampler will be dropped.".format(sampler))
|
||||
return (domain.lower, domain.upper)
|
||||
return (domain.lower, domain.upper)
|
||||
|
||||
raise ValueError("BayesOpt does not support parameters of type "
|
||||
"`{}`".format(type(domain).__name__))
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
import copy
|
||||
import logging
|
||||
import math
|
||||
from typing import Dict, Optional, Union
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
import ConfigSpace
|
||||
from ray.tune.result import DEFAULT_METRIC
|
||||
|
@ -51,6 +51,11 @@ class TuneBOHB(Searcher):
|
|||
per default.
|
||||
mode (str): One of {min, max}. Determines whether objective is
|
||||
minimizing or maximizing the metric attribute.
|
||||
points_to_evaluate (list): Initial parameter suggestions to be run
|
||||
first. This is for when you already have some good parameters
|
||||
you want to run first to help the algorithm make better suggestions
|
||||
for future parameters. Needs to be a list of dicts containing the
|
||||
configurations.
|
||||
seed (int): Optional random seed to initialize the random number
|
||||
generator. Setting this should lead to identical initial
|
||||
configurations at each run.
|
||||
|
@ -107,6 +112,7 @@ class TuneBOHB(Searcher):
|
|||
max_concurrent: int = 10,
|
||||
metric: Optional[str] = None,
|
||||
mode: Optional[str] = None,
|
||||
points_to_evaluate: Optional[List[Dict]] = None,
|
||||
seed: Optional[int] = None):
|
||||
from hpbandster.optimizers.config_generators.bohb import BOHB
|
||||
assert BOHB is not None, """HpBandSter must be installed!
|
||||
|
@ -133,6 +139,8 @@ class TuneBOHB(Searcher):
|
|||
self._space = space
|
||||
self._seed = seed
|
||||
|
||||
self._points_to_evaluate = points_to_evaluate
|
||||
|
||||
super(TuneBOHB, self).__init__(metric=self._metric, mode=mode)
|
||||
|
||||
if self._space:
|
||||
|
@ -185,8 +193,11 @@ class TuneBOHB(Searcher):
|
|||
mode=self._mode))
|
||||
|
||||
if len(self.running) < self._max_concurrent:
|
||||
# This parameter is not used in hpbandster implementation.
|
||||
config, info = self.bohber.get_config(None)
|
||||
if self._points_to_evaluate:
|
||||
config = self._points_to_evaluate.pop(0)
|
||||
else:
|
||||
# This parameter is not used in hpbandster implementation.
|
||||
config, info = self.bohber.get_config(None)
|
||||
self.trial_to_params[trial_id] = copy.deepcopy(config)
|
||||
self.running.add(trial_id)
|
||||
return unflatten_dict(config)
|
||||
|
|
|
@ -12,7 +12,7 @@ from ray.tune.sample import Domain, Float, Quantized
|
|||
from ray.tune.suggest.suggestion import UNRESOLVED_SEARCH_SPACE, \
|
||||
UNDEFINED_METRIC_MODE, UNDEFINED_SEARCH_SPACE
|
||||
from ray.tune.suggest.variant_generator import parse_spec_vars
|
||||
from ray.tune.utils.util import flatten_dict, is_nan_or_inf
|
||||
from ray.tune.utils.util import flatten_dict, is_nan_or_inf, unflatten_dict
|
||||
|
||||
try: # Python 3 only -- needed for lint test.
|
||||
import dragonfly
|
||||
|
@ -68,11 +68,11 @@ class DragonflySearch(Searcher):
|
|||
per default.
|
||||
mode (str): One of {min, max}. Determines whether objective is
|
||||
minimizing or maximizing the metric attribute.
|
||||
points_to_evaluate (list of lists): A list of points you'd like to run
|
||||
first before sampling from the optimiser, e.g. these could be
|
||||
parameter configurations you already know work well to help
|
||||
the optimiser select good values. Each point is a list of the
|
||||
parameters using the order definition given by parameter_names.
|
||||
points_to_evaluate (list): Initial parameter suggestions to be run
|
||||
first. This is for when you already have some good parameters
|
||||
you want to run first to help the algorithm make better suggestions
|
||||
for future parameters. Needs to be a list of dicts containing the
|
||||
configurations.
|
||||
evaluated_rewards (list): If you have previously evaluated the
|
||||
parameters passed in as points_to_evaluate you can avoid
|
||||
re-running those trials by passing in the reward attributes
|
||||
|
@ -142,7 +142,7 @@ class DragonflySearch(Searcher):
|
|||
space: Optional[Union[Dict, List[Dict]]] = None,
|
||||
metric: Optional[str] = None,
|
||||
mode: Optional[str] = None,
|
||||
points_to_evaluate: Optional[List[List]] = None,
|
||||
points_to_evaluate: Optional[List[Dict]] = None,
|
||||
evaluated_rewards: Optional[List] = None,
|
||||
**kwargs):
|
||||
assert dragonfly is not None, """dragonfly must be installed!
|
||||
|
@ -170,6 +170,7 @@ class DragonflySearch(Searcher):
|
|||
self._evaluated_rewards = evaluated_rewards
|
||||
self._initial_points = []
|
||||
self._live_trial_mapping = {}
|
||||
self._point_parameter_names = []
|
||||
|
||||
self._opt = None
|
||||
if isinstance(optimizer, BlackboxOptimiser):
|
||||
|
@ -206,6 +207,8 @@ class DragonflySearch(Searcher):
|
|||
"You have to set a `domain` when initializing dragonfly. "
|
||||
"Choose one of [Cartesian, Euclidean].")
|
||||
|
||||
self._point_parameter_names = [param["name"] for param in self._space]
|
||||
|
||||
if self._domain.lower().startswith("cartesian"):
|
||||
function_caller_cls = CPFunctionCaller
|
||||
elif self._domain.lower().startswith("euclidean"):
|
||||
|
@ -250,12 +253,18 @@ class DragonflySearch(Searcher):
|
|||
self.init_dragonfly()
|
||||
|
||||
def init_dragonfly(self):
|
||||
if self._points_to_evaluate:
|
||||
points_to_evaluate = [[
|
||||
config[par] for par in self._point_parameter_names
|
||||
] for config in self._points_to_evaluate]
|
||||
else:
|
||||
points_to_evaluate = None
|
||||
|
||||
self._opt.initialise()
|
||||
if self._points_to_evaluate and self._evaluated_rewards:
|
||||
self._opt.tell([(self._points_to_evaluate,
|
||||
self._evaluated_rewards)])
|
||||
elif self._points_to_evaluate:
|
||||
self._initial_points = self._points_to_evaluate
|
||||
if points_to_evaluate and self._evaluated_rewards:
|
||||
self._opt.tell([(points_to_evaluate, self._evaluated_rewards)])
|
||||
elif points_to_evaluate:
|
||||
self._initial_points = points_to_evaluate
|
||||
# Dragonfly internally maximizes, so "min" => -1
|
||||
if self._mode == "min":
|
||||
self._metric_op = -1.
|
||||
|
@ -306,7 +315,11 @@ class DragonflySearch(Searcher):
|
|||
"parallelism in the experiment: %s", str(exc))
|
||||
return None
|
||||
self._live_trial_mapping[trial_id] = suggested_config
|
||||
return {"point": suggested_config}
|
||||
|
||||
config = dict(zip(self._point_parameter_names, suggested_config))
|
||||
# Keep backwards compatibility
|
||||
config.update(point=suggested_config)
|
||||
return unflatten_dict(config)
|
||||
|
||||
def on_trial_complete(self,
|
||||
trial_id: str,
|
||||
|
|
|
@ -59,7 +59,7 @@ class HyperOptSearch(Searcher):
|
|||
points_to_evaluate (list): Initial parameter suggestions to be run
|
||||
first. This is for when you already have some good parameters
|
||||
you want to run first to help the algorithm make better suggestions
|
||||
for future parameters. Needs to be a list of dict containing the
|
||||
for future parameters. Needs to be a list of dicts containing the
|
||||
configurations.
|
||||
n_initial_points (int): number of random evaluations of the
|
||||
objective function before starting to aproximate it with
|
||||
|
@ -155,7 +155,7 @@ class HyperOptSearch(Searcher):
|
|||
if gamma is not None:
|
||||
self.algo = partial(self.algo, gamma=gamma)
|
||||
|
||||
self._points_to_evaluate = points_to_evaluate
|
||||
self._points_to_evaluate = copy.deepcopy(points_to_evaluate)
|
||||
|
||||
self._live_trial_mapping = {}
|
||||
if random_state_seed is None:
|
||||
|
@ -190,7 +190,8 @@ class HyperOptSearch(Searcher):
|
|||
for i in range(len(self._points_to_evaluate)):
|
||||
config = self._points_to_evaluate[i]
|
||||
self._convert_categories_to_indices(config)
|
||||
|
||||
# HyperOpt treats initial points as LIFO, reverse to get FIFO
|
||||
self._points_to_evaluate = list(reversed(self._points_to_evaluate))
|
||||
self._hpopt_trials = generate_trials_to_calculate(
|
||||
self._points_to_evaluate)
|
||||
self._hpopt_trials.refresh()
|
||||
|
|
|
@ -53,9 +53,9 @@ class NevergradSearch(Searcher):
|
|||
minimizing or maximizing the metric attribute.
|
||||
points_to_evaluate (list): Initial parameter suggestions to be run
|
||||
first. This is for when you already have some good parameters
|
||||
you want hyperopt to run first to help the TPE algorithm
|
||||
make better suggestions for future parameters. Needs to be
|
||||
a list of dict of hyperopt-named variables.
|
||||
you want to run first to help the algorithm make better suggestions
|
||||
for future parameters. Needs to be a list of dicts containing the
|
||||
configurations.
|
||||
use_early_stopped_trials: Deprecated.
|
||||
max_concurrent: Deprecated.
|
||||
|
||||
|
@ -113,8 +113,8 @@ class NevergradSearch(Searcher):
|
|||
space: Optional[Union[Dict, Parameter]] = None,
|
||||
metric: Optional[str] = None,
|
||||
mode: Optional[str] = None,
|
||||
max_concurrent: Optional[int] = None,
|
||||
points_to_evaluate: Optional[List[Dict]] = None,
|
||||
max_concurrent: Optional[int] = None,
|
||||
**kwargs):
|
||||
assert ng is not None, """Nevergrad must be installed!
|
||||
You can install Nevergrad with the command:
|
||||
|
@ -204,6 +204,12 @@ class NevergradSearch(Searcher):
|
|||
raise ValueError("len(parameters_names) must match optimizer "
|
||||
"dimension for non-instrumented optimizers")
|
||||
|
||||
if self._points_to_evaluate:
|
||||
# Nevergrad is LIFO, so we add the points to evaluate in reverse
|
||||
# order.
|
||||
for i in range(len(self._points_to_evaluate) - 1, -1, -1):
|
||||
self._nevergrad_opt.suggest(self._points_to_evaluate[i])
|
||||
|
||||
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
|
||||
config: Dict) -> bool:
|
||||
if self._nevergrad_opt or self._space:
|
||||
|
@ -235,10 +241,6 @@ class NevergradSearch(Searcher):
|
|||
if len(self._live_trial_mapping) >= self.max_concurrent:
|
||||
return None
|
||||
|
||||
if self._points_to_evaluate is not None:
|
||||
if len(self._points_to_evaluate) > 0:
|
||||
point_to_evaluate = self._points_to_evaluate.pop(0)
|
||||
self._nevergrad_opt.suggest(point_to_evaluate)
|
||||
suggested_config = self._nevergrad_opt.ask()
|
||||
|
||||
self._live_trial_mapping[trial_id] = suggested_config
|
||||
|
|
|
@ -62,6 +62,11 @@ class OptunaSearch(Searcher):
|
|||
per default.
|
||||
mode (str): One of {min, max}. Determines whether objective is
|
||||
minimizing or maximizing the metric attribute.
|
||||
points_to_evaluate (list): Initial parameter suggestions to be run
|
||||
first. This is for when you already have some good parameters
|
||||
you want to run first to help the algorithm make better suggestions
|
||||
for future parameters. Needs to be a list of dicts containing the
|
||||
configurations.
|
||||
sampler (optuna.samplers.BaseSampler): Optuna sampler used to
|
||||
draw hyperparameter configurations. Defaults to ``TPESampler``.
|
||||
|
||||
|
@ -109,6 +114,7 @@ class OptunaSearch(Searcher):
|
|||
space: Optional[Union[Dict, List[Tuple]]] = None,
|
||||
metric: Optional[str] = None,
|
||||
mode: Optional[str] = None,
|
||||
points_to_evaluate: Optional[List[Dict]] = None,
|
||||
sampler: Optional[BaseSampler] = None):
|
||||
assert ot is not None, (
|
||||
"Optuna must be installed! Run `pip install optuna`.")
|
||||
|
@ -128,6 +134,8 @@ class OptunaSearch(Searcher):
|
|||
|
||||
self._space = space
|
||||
|
||||
self._points_to_evaluate = points_to_evaluate
|
||||
|
||||
self._study_name = "optuna" # Fixed study name for in-memory storage
|
||||
self._sampler = sampler or ot.samplers.TPESampler()
|
||||
assert isinstance(self._sampler, BaseSampler), \
|
||||
|
@ -188,12 +196,15 @@ class OptunaSearch(Searcher):
|
|||
ot_trial_id)
|
||||
ot_trial = self._ot_trials[trial_id]
|
||||
|
||||
# getattr will fetch the trial.suggest_ function on Optuna trials
|
||||
params = {
|
||||
args[0] if len(args) > 0 else kwargs["name"]: getattr(
|
||||
ot_trial, fn)(*args, **kwargs)
|
||||
for (fn, args, kwargs) in self._space
|
||||
}
|
||||
if self._points_to_evaluate:
|
||||
params = self._points_to_evaluate.pop(0)
|
||||
else:
|
||||
# getattr will fetch the trial.suggest_ function on Optuna trials
|
||||
params = {
|
||||
args[0] if len(args) > 0 else kwargs["name"]: getattr(
|
||||
ot_trial, fn)(*args, **kwargs)
|
||||
for (fn, args, kwargs) in self._space
|
||||
}
|
||||
return unflatten_dict(params)
|
||||
|
||||
def on_trial_result(self, trial_id: str, result: Dict):
|
||||
|
@ -215,7 +226,8 @@ class OptunaSearch(Searcher):
|
|||
|
||||
def save(self, checkpoint_path: str):
|
||||
save_object = (self._storage, self._pruner, self._sampler,
|
||||
self._ot_trials, self._ot_study)
|
||||
self._ot_trials, self._ot_study,
|
||||
self._points_to_evaluate)
|
||||
with open(checkpoint_path, "wb") as outputFile:
|
||||
pickle.dump(save_object, outputFile)
|
||||
|
||||
|
@ -223,7 +235,8 @@ class OptunaSearch(Searcher):
|
|||
with open(checkpoint_path, "rb") as inputFile:
|
||||
save_object = pickle.load(inputFile)
|
||||
self._storage, self._pruner, self._sampler, \
|
||||
self._ot_trials, self._ot_study = save_object
|
||||
self._ot_trials, self._ot_study, \
|
||||
self._points_to_evaluate = save_object
|
||||
|
||||
@staticmethod
|
||||
def convert_search_space(spec: Dict) -> List[Tuple]:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import copy
|
||||
import logging
|
||||
import pickle
|
||||
from typing import Dict, List, Optional, Tuple, Union
|
||||
|
@ -21,7 +22,7 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
|
||||
def _validate_warmstart(parameter_names: List[str],
|
||||
points_to_evaluate: List[List],
|
||||
points_to_evaluate: List[Union[List, Dict]],
|
||||
evaluated_rewards: List):
|
||||
if points_to_evaluate:
|
||||
if not isinstance(points_to_evaluate, list):
|
||||
|
@ -29,10 +30,10 @@ def _validate_warmstart(parameter_names: List[str],
|
|||
"points_to_evaluate expected to be a list, got {}.".format(
|
||||
type(points_to_evaluate)))
|
||||
for point in points_to_evaluate:
|
||||
if not isinstance(point, list):
|
||||
if not isinstance(point, (dict, list)):
|
||||
raise TypeError(
|
||||
"points_to_evaluate expected to include list, got {}.".
|
||||
format(point))
|
||||
f"points_to_evaluate expected to include list or dict, "
|
||||
f"got {point}.")
|
||||
|
||||
if not len(point) == len(parameter_names):
|
||||
raise ValueError("Dim of point {}".format(point) +
|
||||
|
@ -81,11 +82,11 @@ class SkOptSearch(Searcher):
|
|||
per default.
|
||||
mode (str): One of {min, max}. Determines whether objective is
|
||||
minimizing or maximizing the metric attribute.
|
||||
points_to_evaluate (list of lists): A list of points you'd like to run
|
||||
first before sampling from the optimiser, e.g. these could be
|
||||
parameter configurations you already know work well to help
|
||||
the optimiser select good values. Each point is a list of the
|
||||
parameters using the order definition given by parameter_names.
|
||||
points_to_evaluate (list): Initial parameter suggestions to be run
|
||||
first. This is for when you already have some good parameters
|
||||
you want to run first to help the algorithm make better suggestions
|
||||
for future parameters. Needs to be a list of dicts containing the
|
||||
configurations.
|
||||
evaluated_rewards (list): If you have previously evaluated the
|
||||
parameters passed in as points_to_evaluate you can avoid
|
||||
re-running those trials by passing in the reward attributes
|
||||
|
@ -104,7 +105,16 @@ class SkOptSearch(Searcher):
|
|||
"height": tune.uniform(-100, 100)
|
||||
}
|
||||
|
||||
current_best_params = [[10, 0], [15, -20]]
|
||||
current_best_params = [
|
||||
{
|
||||
"width": 10,
|
||||
"height": 0,
|
||||
},
|
||||
{
|
||||
"width": 15,
|
||||
"height": -20,
|
||||
}
|
||||
]
|
||||
|
||||
skopt_search = SkOptSearch(
|
||||
metric="mean_loss",
|
||||
|
@ -138,7 +148,7 @@ class SkOptSearch(Searcher):
|
|||
space: Union[List[str], Dict[str, Union[Tuple, List]]] = None,
|
||||
metric: Optional[str] = None,
|
||||
mode: Optional[str] = None,
|
||||
points_to_evaluate: Optional[List[List]] = None,
|
||||
points_to_evaluate: Optional[List[Dict]] = None,
|
||||
evaluated_rewards: Optional[List] = None,
|
||||
max_concurrent: Optional[int] = None,
|
||||
use_early_stopped_trials: Optional[bool] = None):
|
||||
|
@ -182,7 +192,8 @@ class SkOptSearch(Searcher):
|
|||
self._parameter_names = list(space.keys())
|
||||
self._parameter_ranges = space.values()
|
||||
|
||||
self._points_to_evaluate = points_to_evaluate
|
||||
self._points_to_evaluate = copy.deepcopy(points_to_evaluate)
|
||||
|
||||
self._evaluated_rewards = evaluated_rewards
|
||||
|
||||
self._skopt_opt = optimizer
|
||||
|
@ -192,6 +203,16 @@ class SkOptSearch(Searcher):
|
|||
self._live_trial_mapping = {}
|
||||
|
||||
def _setup_skopt(self):
|
||||
if self._points_to_evaluate and isinstance(self._points_to_evaluate,
|
||||
list):
|
||||
if isinstance(self._points_to_evaluate[0], list):
|
||||
# Keep backwards compatibility
|
||||
self._points_to_evaluate = [
|
||||
dict(zip(self._parameter_names, point))
|
||||
for point in self._points_to_evaluate
|
||||
]
|
||||
# Else: self._points_to_evaluate is already in correct format
|
||||
|
||||
_validate_warmstart(self._parameter_names, self._points_to_evaluate,
|
||||
self._evaluated_rewards)
|
||||
|
||||
|
@ -204,8 +225,9 @@ class SkOptSearch(Searcher):
|
|||
self._skopt_opt = sko.Optimizer(self._parameter_ranges)
|
||||
|
||||
if self._points_to_evaluate and self._evaluated_rewards:
|
||||
self._skopt_opt.tell(self._points_to_evaluate,
|
||||
self._evaluated_rewards)
|
||||
skopt_points = [[point[par] for par in self._parameter_names]
|
||||
for point in self._points_to_evaluate]
|
||||
self._skopt_opt.tell(skopt_points, self._evaluated_rewards)
|
||||
elif self._points_to_evaluate:
|
||||
self._initial_points = self._points_to_evaluate
|
||||
self._parameters = self._parameter_names
|
||||
|
@ -254,12 +276,13 @@ class SkOptSearch(Searcher):
|
|||
if len(self._live_trial_mapping) >= self.max_concurrent:
|
||||
return None
|
||||
if self._initial_points:
|
||||
suggested_config = self._initial_points[0]
|
||||
del self._initial_points[0]
|
||||
suggested_config = self._initial_points.pop(0)
|
||||
skopt_config = [suggested_config[par] for par in self._parameters]
|
||||
else:
|
||||
suggested_config = self._skopt_opt.ask()
|
||||
self._live_trial_mapping[trial_id] = suggested_config
|
||||
return unflatten_dict(dict(zip(self._parameters, suggested_config)))
|
||||
skopt_config = self._skopt_opt.ask()
|
||||
suggested_config = dict(zip(self._parameters, skopt_config))
|
||||
self._live_trial_mapping[trial_id] = skopt_config
|
||||
return unflatten_dict(suggested_config)
|
||||
|
||||
def on_trial_complete(self,
|
||||
trial_id: str,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import copy
|
||||
import logging
|
||||
from typing import Dict, Optional, Tuple
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import ray
|
||||
import ray.cloudpickle as pickle
|
||||
|
@ -11,7 +11,7 @@ from ray.tune.suggest.suggestion import UNRESOLVED_SEARCH_SPACE, \
|
|||
UNDEFINED_METRIC_MODE, UNDEFINED_SEARCH_SPACE
|
||||
from ray.tune.suggest.variant_generator import parse_spec_vars
|
||||
from ray.tune.utils.util import unflatten_dict
|
||||
from zoopt import ValueType
|
||||
from zoopt import Solution, ValueType
|
||||
|
||||
try:
|
||||
import zoopt
|
||||
|
@ -119,6 +119,11 @@ class ZOOptSearch(Searcher):
|
|||
per default.
|
||||
mode (str): One of {min, max}. Determines whether objective is
|
||||
minimizing or maximizing the metric attribute.
|
||||
points_to_evaluate (list): Initial parameter suggestions to be run
|
||||
first. This is for when you already have some good parameters
|
||||
you want to run first to help the algorithm make better suggestions
|
||||
for future parameters. Needs to be a list of dicts containing the
|
||||
configurations.
|
||||
parallel_num (int): How many workers to parallel. Note that initial
|
||||
phase may start less workers than this number. More details can
|
||||
be found in zoopt package.
|
||||
|
@ -132,6 +137,7 @@ class ZOOptSearch(Searcher):
|
|||
dim_dict: Optional[Dict] = None,
|
||||
metric: Optional[str] = None,
|
||||
mode: Optional[str] = None,
|
||||
points_to_evaluate: Optional[List[Dict]] = None,
|
||||
**kwargs):
|
||||
assert zoopt is not None, "ZOOpt not found - please install zoopt " \
|
||||
"by `pip install -U zoopt`."
|
||||
|
@ -160,6 +166,9 @@ class ZOOptSearch(Searcher):
|
|||
self._metric_op = -1.
|
||||
elif mode == "min":
|
||||
self._metric_op = 1.
|
||||
|
||||
self._points_to_evaluate = copy.deepcopy(points_to_evaluate)
|
||||
|
||||
self._live_trial_mapping = {}
|
||||
|
||||
self._dim_keys = []
|
||||
|
@ -184,12 +193,22 @@ class ZOOptSearch(Searcher):
|
|||
self._dim_keys.append(k)
|
||||
_dim_list.append(self._dim_dict[k])
|
||||
|
||||
init_samples = None
|
||||
if self._points_to_evaluate:
|
||||
logger.warning(
|
||||
"`points_to_evaluate` seems to be ignored by ZOOpt.")
|
||||
init_samples = [
|
||||
Solution(x=tuple(point[dim] for dim in self._dim_keys))
|
||||
for point in self._points_to_evaluate
|
||||
]
|
||||
dim = zoopt.Dimension2(_dim_list)
|
||||
par = zoopt.Parameter(budget=self._budget)
|
||||
par = zoopt.Parameter(budget=self._budget, init_samples=init_samples)
|
||||
if self._algo == "sracos" or self._algo == "asracos":
|
||||
from zoopt.algos.opt_algorithms.racos.sracos import SRacosTune
|
||||
self.optimizer = SRacosTune(
|
||||
dimension=dim, parameter=par, **self.kwargs)
|
||||
if init_samples:
|
||||
self.optimizer.init_attribute()
|
||||
|
||||
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
|
||||
config: Dict) -> bool:
|
||||
|
|
|
@ -253,10 +253,14 @@ class SearchSpaceTest(unittest.TestCase):
|
|||
self.assertLess(config1["b"]["z"], 1e-2)
|
||||
|
||||
searcher = BayesOptSearch()
|
||||
|
||||
invalid_config = {"a/b": tune.uniform(4.0, 8.0)}
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
searcher.set_search_properties("none", "max", invalid_config)
|
||||
|
||||
invalid_config = {"a": {"b/c": tune.uniform(4.0, 8.0)}}
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
searcher.set_search_properties("none", "max", invalid_config)
|
||||
|
||||
|
@ -373,7 +377,7 @@ class SearchSpaceTest(unittest.TestCase):
|
|||
config2 = searcher2.suggest("0")
|
||||
|
||||
self.assertEqual(config1, config2)
|
||||
self.assertLess(config2["point"], 1e-2)
|
||||
self.assertLess(config2["b"]["z"], 1e-2)
|
||||
|
||||
searcher = DragonflySearch()
|
||||
invalid_config = {"a/b": tune.uniform(4.0, 8.0)}
|
||||
|
@ -388,7 +392,7 @@ class SearchSpaceTest(unittest.TestCase):
|
|||
analysis = tune.run(
|
||||
_mock_objective, config=config, search_alg=searcher, num_samples=1)
|
||||
trial = analysis.trials[0]
|
||||
self.assertLess(trial.config["point"], 1e-2)
|
||||
self.assertLess(trial.config["b"]["z"], 1e-2)
|
||||
|
||||
mixed_config = {
|
||||
"a": tune.uniform(5, 6),
|
||||
|
@ -402,8 +406,8 @@ class SearchSpaceTest(unittest.TestCase):
|
|||
mode="max")
|
||||
config = searcher.suggest("0")
|
||||
|
||||
self.assertTrue(5 <= config["point"][0] <= 6)
|
||||
self.assertTrue(8 <= config["point"][1] <= 9)
|
||||
self.assertTrue(5 <= config["a"] <= 6)
|
||||
self.assertTrue(8 <= config["b"] <= 9)
|
||||
|
||||
def testConvertHyperOpt(self):
|
||||
from ray.tune.suggest.hyperopt import HyperOptSearch
|
||||
|
@ -567,49 +571,6 @@ class SearchSpaceTest(unittest.TestCase):
|
|||
self.assertTrue(5 <= config["a"] <= 6)
|
||||
self.assertTrue(8 <= config["b"] <= 9)
|
||||
|
||||
def testNevergradBestParams(self):
|
||||
from ray.tune.suggest.nevergrad import NevergradSearch
|
||||
import nevergrad as ng
|
||||
|
||||
config = {
|
||||
"metric": tune.sample.Categorical([1, 2, 3, 4]).uniform(),
|
||||
"a": tune.sample.Categorical(["t1", "t2", "t3", "t4"]).uniform(),
|
||||
"b": tune.sample.Integer(0, 5),
|
||||
"c": tune.sample.Float(1e-4, 1e-1).loguniform()
|
||||
}
|
||||
|
||||
best_params = [{
|
||||
"metric": 1,
|
||||
"a": "t1",
|
||||
"b": 1,
|
||||
"c": 1e-1
|
||||
}, {
|
||||
"metric": 2,
|
||||
"a": "t2",
|
||||
"b": 2,
|
||||
"c": 1e-2
|
||||
}]
|
||||
|
||||
searcher = NevergradSearch(
|
||||
optimizer=ng.optimizers.OnePlusOne, points_to_evaluate=best_params)
|
||||
analysis = tune.run(
|
||||
_mock_objective,
|
||||
config=config,
|
||||
metric="metric",
|
||||
mode="max",
|
||||
search_alg=searcher,
|
||||
num_samples=5)
|
||||
|
||||
for i in range(len(best_params)):
|
||||
trial_config = analysis.trials[i].config
|
||||
trial_config_dict = {
|
||||
"metric": trial_config["metric"],
|
||||
"a": trial_config["a"],
|
||||
"b": trial_config["b"],
|
||||
"c": trial_config["c"]
|
||||
}
|
||||
self.assertDictEqual(trial_config_dict, best_params[i])
|
||||
|
||||
def testConvertOptuna(self):
|
||||
from ray.tune.suggest.optuna import OptunaSearch, param
|
||||
from optuna.samplers import RandomSampler
|
||||
|
@ -780,6 +741,136 @@ class SearchSpaceTest(unittest.TestCase):
|
|||
self.assertTrue(5 <= config["a"] <= 6)
|
||||
self.assertTrue(8 <= config["b"] <= 9)
|
||||
|
||||
def _testPointsToEvaluate(self, cls, config, **kwargs):
|
||||
points_to_evaluate = [{k: v.sample()
|
||||
for k, v in config.items()} for _ in range(2)]
|
||||
print(f"Points to evaluate: {points_to_evaluate}")
|
||||
searcher = cls(points_to_evaluate=points_to_evaluate, **kwargs)
|
||||
|
||||
analysis = tune.run(
|
||||
_mock_objective,
|
||||
config=config,
|
||||
metric="metric",
|
||||
mode="max",
|
||||
search_alg=searcher,
|
||||
num_samples=5)
|
||||
|
||||
for i in range(len(points_to_evaluate)):
|
||||
trial_config = analysis.trials[i].config
|
||||
trial_config_dict = {
|
||||
"metric": trial_config["metric"],
|
||||
"a": trial_config["a"],
|
||||
"b": trial_config["b"],
|
||||
"c": trial_config["c"]
|
||||
}
|
||||
self.assertDictEqual(trial_config_dict, points_to_evaluate[i])
|
||||
|
||||
def testPointsToEvaluateAx(self):
|
||||
config = {
|
||||
"metric": tune.sample.Categorical([1, 2, 3, 4]).uniform(),
|
||||
"a": tune.sample.Categorical(["t1", "t2", "t3", "t4"]).uniform(),
|
||||
"b": tune.sample.Integer(0, 5),
|
||||
"c": tune.sample.Float(1e-4, 1e-1).loguniform()
|
||||
}
|
||||
|
||||
from ray.tune.suggest.ax import AxSearch
|
||||
return self._testPointsToEvaluate(AxSearch, config)
|
||||
|
||||
def testPointsToEvaluateBayesOpt(self):
|
||||
config = {
|
||||
"metric": tune.sample.Float(10, 20).uniform(),
|
||||
"a": tune.sample.Float(-30, -20).uniform(),
|
||||
"b": tune.sample.Float(0, 5),
|
||||
"c": tune.sample.Float(1e-4, 1e-1).loguniform()
|
||||
}
|
||||
|
||||
from ray.tune.suggest.bayesopt import BayesOptSearch
|
||||
return self._testPointsToEvaluate(BayesOptSearch, config)
|
||||
|
||||
def testPointsToEvaluateBOHB(self):
|
||||
config = {
|
||||
"metric": tune.sample.Categorical([1, 2, 3, 4]).uniform(),
|
||||
"a": tune.sample.Categorical(["t1", "t2", "t3", "t4"]).uniform(),
|
||||
"b": tune.sample.Integer(0, 5),
|
||||
"c": tune.sample.Float(1e-4, 1e-1).loguniform()
|
||||
}
|
||||
|
||||
from ray.tune.suggest.bohb import TuneBOHB
|
||||
return self._testPointsToEvaluate(TuneBOHB, config)
|
||||
|
||||
def testPointsToEvaluateDragonfly(self):
|
||||
config = {
|
||||
"metric": tune.sample.Float(10, 20).uniform(),
|
||||
"a": tune.sample.Float(-30, -20).uniform(),
|
||||
"b": tune.sample.Float(0, 5),
|
||||
"c": tune.sample.Float(1e-4, 1e-1).loguniform()
|
||||
}
|
||||
|
||||
from ray.tune.suggest.dragonfly import DragonflySearch
|
||||
return self._testPointsToEvaluate(
|
||||
DragonflySearch, config, domain="euclidean", optimizer="bandit")
|
||||
|
||||
def testPointsToEvaluateHyperOpt(self):
|
||||
config = {
|
||||
"metric": tune.sample.Categorical([1, 2, 3, 4]).uniform(),
|
||||
"a": tune.sample.Categorical(["t1", "t2", "t3", "t4"]).uniform(),
|
||||
"b": tune.sample.Integer(0, 5),
|
||||
"c": tune.sample.Float(1e-4, 1e-1).loguniform()
|
||||
}
|
||||
|
||||
from ray.tune.suggest.hyperopt import HyperOptSearch
|
||||
return self._testPointsToEvaluate(HyperOptSearch, config)
|
||||
|
||||
def testPointsToEvaluateNevergrad(self):
|
||||
config = {
|
||||
"metric": tune.sample.Categorical([1, 2, 3, 4]).uniform(),
|
||||
"a": tune.sample.Categorical(["t1", "t2", "t3", "t4"]).uniform(),
|
||||
"b": tune.sample.Integer(0, 5),
|
||||
"c": tune.sample.Float(1e-4, 1e-1).loguniform()
|
||||
}
|
||||
|
||||
from ray.tune.suggest.nevergrad import NevergradSearch
|
||||
import nevergrad as ng
|
||||
return self._testPointsToEvaluate(
|
||||
NevergradSearch, config, optimizer=ng.optimizers.OnePlusOne)
|
||||
|
||||
def testPointsToEvaluateOptuna(self):
|
||||
config = {
|
||||
"metric": tune.sample.Categorical([1, 2, 3, 4]).uniform(),
|
||||
"a": tune.sample.Categorical(["t1", "t2", "t3", "t4"]).uniform(),
|
||||
"b": tune.sample.Integer(0, 5),
|
||||
"c": tune.sample.Float(1e-4, 1e-1).loguniform()
|
||||
}
|
||||
|
||||
from ray.tune.suggest.optuna import OptunaSearch
|
||||
return self._testPointsToEvaluate(OptunaSearch, config)
|
||||
|
||||
def testPointsToEvaluateSkOpt(self):
|
||||
config = {
|
||||
"metric": tune.sample.Categorical([1, 2, 3, 4]).uniform(),
|
||||
"a": tune.sample.Categorical(["t1", "t2", "t3", "t4"]).uniform(),
|
||||
"b": tune.sample.Integer(0, 5),
|
||||
"c": tune.sample.Float(1e-4, 1e-1).loguniform()
|
||||
}
|
||||
|
||||
from ray.tune.suggest.skopt import SkOptSearch
|
||||
return self._testPointsToEvaluate(SkOptSearch, config)
|
||||
|
||||
def testPointsToEvaluateZoOpt(self):
|
||||
# https://github.com/polixir/ZOOpt/issues/5
|
||||
self.skipTest("ZoOpt currently ignores initial points. This test "
|
||||
"will be enabled after this has been fixed.")
|
||||
config = {
|
||||
"metric": tune.sample.Categorical([1, 2, 3, 4]).uniform(),
|
||||
"a": tune.sample.Categorical(["t1", "t2", "t3", "t4"]).uniform(),
|
||||
"b": tune.sample.Integer(0, 5),
|
||||
"c": tune.sample.Float(1e-4, 1e-1).uniform()
|
||||
}
|
||||
|
||||
from ray.tune.suggest.zoopt import ZOOptSearch
|
||||
return self._testPointsToEvaluate(
|
||||
ZOOptSearch, config, budget=10, parallel_num=8)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import pytest
|
||||
|
|
Loading…
Add table
Reference in a new issue