From 4ea7acbf5b3dc3e2a599d0ada35f8b7712a48368 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Fri, 18 Apr 2025 02:03:25 +0200 Subject: [PATCH 001/156] Initial code for the new pipeline space --- neps/optimizers/__init__.py | 3 +- neps/space/new_space/__init__.py | 0 neps/space/new_space/config_string.py | 234 +++ neps/space/new_space/space.py | 1339 +++++++++++++++++ .../new_space/tests/test_neps_integration.py | 334 ++++ .../tests/test_search_space__fidelity.py | 110 ++ .../tests/test_search_space__hnas_like.py | 299 ++++ .../tests/test_search_space__recursion.py | 90 ++ .../tests/test_search_space__resampled.py | 274 ++++ .../test_search_space__reuse_arch_elements.py | 387 +++++ neps/space/new_space/tests/utils.py | 24 + neps/space/parsing.py | 6 +- pyproject.toml | 1 + 13 files changed, 3099 insertions(+), 2 deletions(-) create mode 100644 neps/space/new_space/__init__.py create mode 100644 neps/space/new_space/config_string.py create mode 100644 neps/space/new_space/space.py create mode 100644 neps/space/new_space/tests/test_neps_integration.py create mode 100644 neps/space/new_space/tests/test_search_space__fidelity.py create mode 100644 neps/space/new_space/tests/test_search_space__hnas_like.py create mode 100644 neps/space/new_space/tests/test_search_space__recursion.py create mode 100644 neps/space/new_space/tests/test_search_space__resampled.py create mode 100644 neps/space/new_space/tests/test_search_space__reuse_arch_elements.py create mode 100644 neps/space/new_space/tests/utils.py diff --git a/neps/optimizers/__init__.py b/neps/optimizers/__init__.py index 9b97790a9..9b2e7f34e 100644 --- a/neps/optimizers/__init__.py +++ b/neps/optimizers/__init__.py @@ -13,6 +13,7 @@ from neps.utils.common import extract_keyword_defaults if TYPE_CHECKING: + from neps.space.new_space.space import Pipeline from neps.space import SearchSpace @@ -51,7 +52,7 @@ def load_optimizer( | CustomOptimizer | Literal["auto"] ), - space: SearchSpace, + space: SearchSpace | Pipeline, ) -> tuple[AskFunction, OptimizerInfo]: match optimizer: # Predefined string (including "auto") diff --git a/neps/space/new_space/__init__.py b/neps/space/new_space/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neps/space/new_space/config_string.py b/neps/space/new_space/config_string.py new file mode 100644 index 000000000..06805badf --- /dev/null +++ b/neps/space/new_space/config_string.py @@ -0,0 +1,234 @@ +from __future__ import annotations + +import dataclasses +import functools + + +@dataclasses.dataclass(frozen=True) +class UnwrappedConfigStringPart: + level: int + opening_index: int + operator: str + hyperparameters: str + operands: str + + +@functools.lru_cache(maxsize=2000) +def unwrap_config_string(config_string: str) -> tuple[UnwrappedConfigStringPart]: + """ + For a given config string, gets the parenthetic contents of it + and uses them to construct objects of type `UnwrappedConfigStringPart`. + + First unwraps a given parenthesised config_string into parts. + Then it converts these parts into objects with structured information. + """ + + # A workaround needed since in the existing configurations + # generated by previous methods, e.g. the `resBlock resBlock` and `resBlock` items + # occur without wrapping parenthesis, differently from other items. + # Wrap them appropriately in parentheses here and in the inverse process. + # For example 'id' comes in two forms: 'id id' and 'Ops id', + # only the 'id id' variant should be replaced. + replacements = [ + ("resBlock", True), + ("id", False), + ] + for op, replace_individual in replacements: + config_string = config_string.replace(f"{op} {op}", "__TMP_PLACEHOLDER___") + if replace_individual: + config_string = config_string.replace(f"{op}", f"({op})") + config_string = config_string.replace("__TMP_PLACEHOLDER___", f"({op} {op})") + + result = [] + + stack = [] + opening_counter = 0 + for current_char_index, current_char in enumerate(config_string): + if current_char == "(": + stack.append((current_char_index, opening_counter)) + opening_counter += 1 + elif current_char == ")": + assert stack, f"Found ')' with no matching '('. Index: {current_char_index}" + + start_char_index, opening_index = stack.pop() + level = len(stack) + 1 # start level counting from 1 and not 0 + + value = config_string[start_char_index + 1 : current_char_index] + value = value.split(" (", maxsplit=1) + operator = value[0] + if len(value) > 1: + operands = "(" + value[1] + else: + operands = "" + + if " {" in operator: + operator, hyperparameters = operator.split(" {") + hyperparameters = "{" + hyperparameters + else: + hyperparameters = "{}" + + item = UnwrappedConfigStringPart( + level=level, + opening_index=opening_index, + operator=operator, + hyperparameters=hyperparameters, + operands=operands, + ) + result.append(item) + + assert not stack, f"For '(' found no matching ')': Index: {stack[0][0]}" + result = tuple(sorted(result, key=lambda x: x.opening_index)) + return result + + +# Current profiling shows this function does not run that often +# so no need for caching +def wrap_config_into_string( + unwrapped_config: tuple[UnwrappedConfigStringPart, ...], + max_level: int | None = None, +) -> str: + """ + For a given unwrapped config, returns the string representing it. + :param unwrapped_config: The unwrapped config + :param max_level: + An optional int telling which is the maximal considered level. + Bigger levels are ignored + """ + result = [] + current_level = 0 + for item in unwrapped_config: + if max_level is not None and item.level > max_level: + continue + + if item.level > current_level: + if item.hyperparameters not in ("{}", ""): + value = " (" + str(item.operator) + " " + item.hyperparameters + else: + value = " (" + str(item.operator) + elif item.level < current_level: + value = ")" * (current_level - item.level + 1) + " (" + str(item.operator) + else: + value = ") (" + str(item.operator) + current_level = item.level + result.append(value) + result.append(")" * current_level) + + result = "".join(result).strip() + + # A workaround needed since in the existing configurations + # generated by previous methods, e.g. the `resBlock resBlock` and `resBlock` items + # occur without wrapping parenthesis, differently from other items. + # Wrap them appropriately in parentheses here and in the inverse process. + # For example 'id' comes in two forms: 'id id' and 'Ops id', + # only the 'id id' variant should be replaced. + replacements = [ + ("resBlock", True), + ("id", False), + ] + for op, replace_individual in replacements: + result = result.replace(f"({op} {op})", "__TMP_PLACEHOLDER___") + if replace_individual: + result = result.replace(f"({op})", f"{op}") + result = result.replace("__TMP_PLACEHOLDER___", f"{op} {op}") + + return result + + +class ConfigString: + def __init__(self, config_string: str) -> None: + if config_string is None or len(config_string) == 0: + raise ValueError(f"Invalid config string: {config_string}") + self.config_string = config_string + + # The fields below are needed for lazy and cached evaluation. + # In python 3.8+ can be replaced by `cached_property` + self._unwrapped: tuple[UnwrappedConfigStringPart] | None = None + self._max_hierarchy_level: int | None = None + + # a cache for the different hierarchy levels of this config string + self._at_hierarchy_level_cache: dict[int, ConfigString] = {} + + @property + def unwrapped(self) -> tuple[UnwrappedConfigStringPart]: + if self._unwrapped is not None: + return self._unwrapped + + unwrapped = unwrap_config_string(self.config_string) + if not unwrapped: + raise ValueError(f"Error unwrapping config string: {self.config_string}") + + # NOTE: slow test that can possibly be removed + # test that meaning was preserved between wrapping and unwrapping + # to make sure the config string wrapping/unwrapping is working well + rewrapped_config = wrap_config_into_string(unwrapped_config=unwrapped) + assert self.config_string == rewrapped_config, ( + "Error during wrapping unwrapping: config_string != rewrapped_config_string", + self.config_string, + rewrapped_config, + ) + + self._unwrapped = unwrapped + return self._unwrapped + + @property + def max_hierarchy_level(self) -> int: + if self._max_hierarchy_level is not None: + return self._max_hierarchy_level + + max_hierarchy_level = max(i.level for i in self.unwrapped) + assert max_hierarchy_level > 0, f"Invalid max hierarchy level: {self.max_hierarchy_level}" + + self._max_hierarchy_level = max_hierarchy_level + return self._max_hierarchy_level + + def at_hierarchy_level(self, level: int) -> ConfigString: + """ + Get a representation of this config at the chosen hierarchy level. + :param level: + When >0, get the config the that hierarchy level. + When <0, get the config at (max_hierarchy_level - level + 1), + similar to negative python indices in e.g. lists. + :return: + """ + if level == 0: + raise ValueError(f"Invalid value for `level`. Received level == 0: {level}") + if level > self.max_hierarchy_level: + raise ValueError( + "Invalid value for `level`. " + f"level>max_hierarchy_level: {level}>{self.max_hierarchy_level}" + ) + if level < -self.max_hierarchy_level: + raise ValueError( + "Invalid value for `level`. " + f"level<-max_hierarchy_level: {level}<-{self.max_hierarchy_level}" + ) + + if level < 0: + # for example for level=-1, when max_hierarchy_level=7, new level is 7 + # for example for level=-3, when max_hierarchy_level=7, new level is 5 + level = self.max_hierarchy_level + (level + 1) + + if level in self._at_hierarchy_level_cache: + return self._at_hierarchy_level_cache[level] + + config_string_at_hierarchy_level = wrap_config_into_string(unwrapped_config=self.unwrapped, max_level=level) + config_at_hierarchy_level = ConfigString(config_string_at_hierarchy_level) + self._at_hierarchy_level_cache[level] = config_at_hierarchy_level + + return self._at_hierarchy_level_cache[level] + + def pretty_format(self) -> str: + format_str = "{indent}{item.level:0>2d} :: {item.operator} {item.hyperparameters}" + lines = [self.config_string] + for item in self.unwrapped: + lines.append(format_str.format(item=item, indent="\t" * item.level)) + return "\n".join(lines) + + def __eq__(self, other: object) -> bool: + if isinstance(other, self.__class__): + return self.config_string == other.config_string + raise NotImplementedError() # let the other side check for equality + + def __ne__(self, other: object) -> bool: + return not self.__eq__(other) + + def __hash__(self) -> int: + return self.config_string.__hash__() diff --git a/neps/space/new_space/space.py b/neps/space/new_space/space.py new file mode 100644 index 000000000..b2dc58b59 --- /dev/null +++ b/neps/space/new_space/space.py @@ -0,0 +1,1339 @@ +from __future__ import annotations + +import abc +import dataclasses +import functools +import heapq +import random +import math +import enum +import contextlib +from typing import ( + TypeVar, + Generic, + Sequence, + Any, + Protocol, + runtime_checkable, + cast, + Callable, + Mapping, + Generator, +) + +import neps.space.new_space.config_string as config_string +import neps.optimizers.optimizer as optimizer +import neps.state.trial as trial_state +import neps.state.optimizer as optimizer_state + + +T = TypeVar("T") +P = TypeVar("P", bound="Pipeline") + + +# ------------------------------------------------- + + +class _Unset: + pass + + +_UNSET = _Unset() + + +# ------------------------------------------------- + + +@runtime_checkable +class Resolvable(Protocol): + def get_attrs(self) -> Mapping[str, Any]: + raise NotImplementedError() + + def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: + raise NotImplementedError() + + +def resolvable_is_fully_resolved(resolvable: Resolvable) -> bool: + attr_objects = resolvable.get_attrs().values() + return all(not isinstance(obj, Resolvable) or resolvable_is_fully_resolved(obj) for obj in attr_objects) + + +@runtime_checkable +class DomainSampler(Protocol): + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + raise NotImplementedError() + + +# ------------------------------------------------- + + +class Pipeline(Resolvable): + @property + def fidelity_attrs(self) -> Mapping[str, Fidelity]: + return {k: v for k, v in self.get_attrs().items() if isinstance(v, Fidelity)} + + def get_attrs(self) -> Mapping[str, Any]: + attrs = {} + + for attr_name, attr_value in vars(self.__class__).items(): + if attr_name.startswith("_") or callable(attr_value): + continue + attrs[attr_name] = attr_value + + for attr_name, attr_value in vars(self).items(): + if attr_name.startswith("_") or callable(attr_value): + continue + attrs[attr_name] = attr_value + + properties_to_ignore = ("fidelity_attrs",) + for property_to_ignore in properties_to_ignore: + attrs.pop(property_to_ignore, None) + + return attrs + + def from_attrs(self, attrs: Mapping[str, Any]) -> Pipeline: + new_pipeline = Pipeline() + for name, value in attrs.items(): + setattr(new_pipeline, name, value) + return new_pipeline + + +class ConfidenceLevel(enum.Enum): + LOW = "low" + MEDIUM = "medium" + HIGH = "high" + + +class Domain(Resolvable, abc.ABC, Generic[T]): + @property + @abc.abstractmethod + def has_prior(self) -> bool: + raise NotImplementedError() + + @property + @abc.abstractmethod + def prior(self) -> T: + raise NotImplementedError() + + @property + @abc.abstractmethod + def prior_confidence(self) -> ConfidenceLevel: + raise NotImplementedError() + + @property + @abc.abstractmethod + def range_compatibility_identifier(self) -> str: + raise NotImplementedError() + + @abc.abstractmethod + def sample(self) -> T: + raise NotImplementedError() + + def get_attrs(self) -> Mapping[str, Any]: + return {k.lstrip("_"): v for k, v in vars(self).items()} + + def from_attrs(self, attrs: Mapping[str, Any]) -> Domain[T]: + return type(self)(**attrs) + + +class Categorical(Domain[int], Generic[T]): + def __init__( + self, + choices: tuple[T | Domain[T] | Resolvable, ...] | Domain[T], + prior_index: int | Domain[int] | _Unset = _UNSET, + prior_confidence: ConfidenceLevel | _Unset = _UNSET, + ): + self._choices: tuple[T | Domain[T] | Resolvable, ...] | Domain[T] + if isinstance(choices, Sequence): + self._choices = tuple(choice for choice in choices) + else: + self._choices = choices + self._prior_index = prior_index + self._prior_confidence = prior_confidence + + @property + def choices(self) -> tuple[T | Domain[T] | Resolvable, ...] | Domain[T]: + return self._choices + + @property + def has_prior(self) -> bool: + return self._prior_index is not _UNSET and self._prior_confidence is not _UNSET + + @property + def prior(self) -> int: + if not self.has_prior: + raise ValueError("Domain has no prior defined.") + return int(cast(int, self._prior_index)) + + @property + def prior_confidence(self) -> ConfidenceLevel: + if not self.has_prior: + raise ValueError("Domain has no prior defined.") + return cast(ConfidenceLevel, self._prior_confidence) + + @property + def range_compatibility_identifier(self) -> str: + return f"{len(cast(tuple, self._choices))}" + + def sample(self) -> int: + return int(random.randint(0, len(cast(tuple[T], self._choices)) - 1)) + + +class Float(Domain[float]): + def __init__( + self, + min_value: float, + max_value: float, + log: bool = False, + prior: float | _Unset = _UNSET, + prior_confidence: ConfidenceLevel | _Unset = _UNSET, + ): + self._min_value = min_value + self._max_value = max_value + self._log = log + self._prior = prior + self._prior_confidence = prior_confidence + + @property + def min_value(self) -> float: + return self._min_value + + @property + def max_value(self) -> float: + return self._max_value + + @property + def has_prior(self) -> bool: + return self._prior is not _UNSET and self._prior_confidence is not _UNSET + + @property + def prior(self) -> float: + if not self.has_prior: + raise ValueError("Domain has no prior defined.") + return float(cast(float, self._prior)) + + @property + def prior_confidence(self) -> ConfidenceLevel: + if not self.has_prior: + raise ValueError("Domain has no prior defined.") + return cast(ConfidenceLevel, self._prior_confidence) + + @property + def range_compatibility_identifier(self) -> str: + return f"{self._min_value}_{self._max_value}_{self._log}" + + def sample(self) -> float: + if self._log: + log_min = math.log(self._min_value) + log_max = math.log(self._max_value) + return float(math.exp(random.uniform(log_min, log_max))) + return float(random.uniform(self._min_value, self._max_value)) + + +class Integer(Domain[int]): + def __init__( + self, + min_value: int, + max_value: int, + log: bool = False, + prior: int | _Unset = _UNSET, + prior_confidence: ConfidenceLevel | _Unset = _UNSET, + ): + self._min_value = min_value + self._max_value = max_value + self._log = log + self._prior = prior + self._prior_confidence = prior_confidence + + @property + def min_value(self) -> int: + return self._min_value + + @property + def max_value(self) -> int: + return self._max_value + + @property + def has_prior(self) -> bool: + return self._prior is not _UNSET and self._prior_confidence is not _UNSET + + @property + def prior(self) -> int: + if not self.has_prior: + raise ValueError("Domain has no prior defined.") + return int(cast(int, self._prior)) + + @property + def prior_confidence(self) -> ConfidenceLevel: + if not self.has_prior: + raise ValueError("Domain has no prior defined.") + return cast(ConfidenceLevel, self._prior_confidence) + + @property + def range_compatibility_identifier(self) -> str: + return f"{self._min_value}_{self._max_value}_{self._log}" + + def sample(self) -> int: + if self._log: + raise NotImplementedError("TODO.") + return int(random.randint(self._min_value, self._max_value)) + + +class Operation(Resolvable): + def __init__( + self, + operator: Callable | str, + args: Sequence[Any] | None = None, + kwargs: Mapping[str, Any] | None = None, + ): + self._operator = operator + self._args = tuple(args) if args else tuple() + self._kwargs = kwargs if kwargs else {} + + @property + def operator(self) -> Callable | str: + return self._operator + + @property + def args(self) -> tuple[Any, ...]: + return self._args + + @property + def kwargs(self) -> Mapping[str, Any]: + return self._kwargs + + def get_attrs(self) -> Mapping[str, Any]: + # TODO: simplify this. We know the fields. Maybe other places too. + result: dict[str, Any] = {} + for name, value in vars(self).items(): + name = name.lstrip("_") + if isinstance(value, dict): + for k, v in value.items(): + # Multiple {{}} needed to escape surrounding '{' and '}'. + result[f"{name}{{{k}}}"] = v + elif isinstance(value, tuple): + for i, v in enumerate(value): + result[f"{name}[{i}]"] = v + else: + result[name] = value + return result + + def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: + # TODO: simplify this. We know the fields. Maybe other places too. + final_attrs: dict[str, Any] = {} + for name, value in attrs.items(): + if "{" in name and "}" in name: + base, key = name.split("{") + key = key.rstrip("}") + final_attrs.setdefault(base, {})[key] = value + elif "[" in name and "]" in name: + base, idx_str = name.split("[") + idx = int(idx_str.rstrip("]")) + final_attrs.setdefault(base, []).insert(idx, value) + else: + final_attrs[name] = value + return type(self)(**final_attrs) + + +class Resampled(Resolvable): + def __init__(self, source: Resolvable | str): + self._source = source + + @property + def source(self) -> Resolvable | str: + return self._source + + @property + def is_resampling_by_name(self) -> bool: + return isinstance(self._source, str) + + def get_attrs(self) -> Mapping[str, Any]: + if self.is_resampling_by_name: + raise ValueError(f"This is a resampling by name, can't get attrs from it: {self.source!r}.") + if not isinstance(self._source, Resolvable): + raise ValueError(f"Source should be a resolvable object. Is: {self._source!r}.") + return self._source.get_attrs() + + def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: + if self.is_resampling_by_name: + raise ValueError(f"This is a resampling by name, can't create object for it: {self.source!r}.") + if not isinstance(self._source, Resolvable): + raise ValueError(f"Source should be a resolvable object. Is: {self._source!r}.") + return self._source.from_attrs(attrs) + + +class Fidelity(Resolvable, Generic[T]): + def __init__(self, domain: Integer | Float): + if domain.has_prior: + raise ValueError(f"The domain of a Fidelity can not have priors: {domain!r}.") + self._domain = domain + + @property + def min_value(self) -> int | float: + return self._domain.min_value + + @property + def max_value(self) -> int | float: + return self._domain.max_value + + def get_attrs(self) -> Mapping[str, Any]: + raise ValueError("For a Fidelity object there is nothing to resolve.") + + def from_attrs(self, attrs: Mapping[str, Any]) -> Fidelity: + raise ValueError("For a Fidelity object there is nothing to resolve.") + + +# ------------------------------------------------- + + +class OnlyPredefinedValuesSampler(DomainSampler): + def __init__( + self, + predefined_samplings: Mapping[str, Any], + ): + self._predefined_samplings = predefined_samplings + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + if current_path not in self._predefined_samplings: + raise ValueError(f"No predefined value for path: {current_path!r}.") + return cast(T, self._predefined_samplings[current_path]) + + +class RandomSampler(DomainSampler): + def __init__( + self, + predefined_samplings: Mapping[str, Any], + ): + self._predefined_samplings = predefined_samplings + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + if current_path not in self._predefined_samplings: + sampled_value = domain_obj.sample() + else: + sampled_value = cast(T, self._predefined_samplings[current_path]) + return sampled_value + + +class PriorOrFallbackSampler(DomainSampler): + def __init__( + self, + fallback_sampler: DomainSampler, + prior_use_probability: float, + ): + if not 0 <= prior_use_probability <= 1: + raise ValueError(f"The given `prior_use_probability` value is out of range: {prior_use_probability!r}.") + + self._fallback_sampler = fallback_sampler + self._prior_use_probability = prior_use_probability + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + use_prior = random.choices( + (True, False), + weights=(self._prior_use_probability, 1 - self._prior_use_probability), + k=1, + )[0] + if domain_obj.has_prior and use_prior: + return domain_obj.prior + return self._fallback_sampler( + domain_obj=domain_obj, + current_path=current_path, + ) + + +class MutateByForgettingSampler(DomainSampler): + def __init__( + self, + predefined_samplings: Mapping[str, Any], + n_forgets: int, + ): + if not isinstance(n_forgets, int) or n_forgets <= 0 or n_forgets > len(predefined_samplings): + raise ValueError(f"Invalid value for `n_forgets`: {n_forgets!r}.") + + mutated_samplings_to_make = _mutate_samplings_to_make_by_forgetting( + samplings_to_make=predefined_samplings, + n_forgets=n_forgets, + ) + + self._random_sampler = RandomSampler( + predefined_samplings=mutated_samplings_to_make, + ) + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + return self._random_sampler(domain_obj=domain_obj, current_path=current_path) + + +class CrossoverNotPossibleError(Exception): + pass + + +class CrossoverByMixingSampler(DomainSampler): + def __init__( + self, + predefined_samplings_1: Mapping[str, Any], + predefined_samplings_2: Mapping[str, Any], + prefer_first_probability: float, + ): + if not isinstance(prefer_first_probability, float) or not (0 <= prefer_first_probability <= 1): + raise ValueError(f"Invalid value for `prefer_first_probability`: {prefer_first_probability!r}.") + + ( + made_any_crossovers, + crossed_over_samplings_to_make, + ) = _crossover_samplings_to_make_by_mixing( + predefined_samplings_1=predefined_samplings_1, + predefined_samplings_2=predefined_samplings_2, + prefer_first_probability=prefer_first_probability, + ) + + if not made_any_crossovers: + raise CrossoverNotPossibleError("No crossovers were made.") + + self._random_sampler = RandomSampler( + predefined_samplings=crossed_over_samplings_to_make, + ) + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + return self._random_sampler(domain_obj=domain_obj, current_path=current_path) + + +def _mutate_samplings_to_make_by_forgetting( + samplings_to_make: Mapping[str, Any], + n_forgets: int, +) -> Mapping[str, Any]: + mutated_samplings_to_make = dict(**samplings_to_make) + + samplings_to_delete = random.sample( + list(samplings_to_make.keys()), + k=n_forgets, + ) + + for choice_to_delete in samplings_to_delete: + mutated_samplings_to_make.pop(choice_to_delete) + + return mutated_samplings_to_make + + +def _crossover_samplings_to_make_by_mixing( + predefined_samplings_1: Mapping[str, Any], + predefined_samplings_2: Mapping[str, Any], + prefer_first_probability: float, +) -> tuple[bool, Mapping[str, Any]]: + crossed_over_samplings = dict(**predefined_samplings_1) + made_any_crossovers = False + + for path, sampled_value_in_2 in predefined_samplings_2.items(): + if path in crossed_over_samplings: + use_value_from_2 = random.choices( + (False, True), + weights=(prefer_first_probability, 1 - prefer_first_probability), + k=1, + )[0] + if use_value_from_2: + crossed_over_samplings[path] = sampled_value_in_2 + made_any_crossovers = True + else: + crossed_over_samplings[path] = sampled_value_in_2 + + return made_any_crossovers, crossed_over_samplings + + +# ------------------------------------------------- + + +class SamplingResolutionContext: + def __init__( + self, + *, + resolution_root: Resolvable, + domain_sampler: DomainSampler, + environment_values: Mapping[str, Any], + ): + if not isinstance(resolution_root, Resolvable): + raise ValueError(f"The received `resolution_root` is not a Resolvable: {resolution_root!r}.") + + if not isinstance(domain_sampler, DomainSampler): + raise ValueError(f"The received `domain_sampler` is not a DomainSampler: {domain_sampler!r}.") + + if not isinstance(environment_values, Mapping): + raise ValueError(f"The received `environment_values` is not a Mapping: {environment_values!r}.") + + # `_resolution_root` stores the root of the resolution. + self._resolution_root: Resolvable = resolution_root + + # `_domain_sampler` stores the object responsible for sampling from Domain objects. + self._domain_sampler = domain_sampler + + # # `_environment_values` stores fixed values from outside. + # # They are not related to samplings and can not be mutated or similar. + self._environment_values = environment_values + + # `_samplings_made` stores the values we have sampled + # and can be used later in case we want to redo a resolving. + self._samplings_made: dict[str, Any] = {} + + # `_resolved_objects` stores the intermediate values to make re-use possible. + self._resolved_objects: dict[Any, Any] = {} + + # `_current_path_parts` stores the current path we are resolving. + self._current_path_parts: list[str] = [] + + @property + def resolution_root(self) -> Resolvable: + return self._resolution_root + + @property + def samplings_made(self) -> Mapping[str, Any]: + return self._samplings_made + + @property + def environment_values(self) -> Mapping[str, Any]: + return self._environment_values + + @contextlib.contextmanager + def resolving(self, _obj: Any, name: str) -> Generator[None]: + if not name or not isinstance(name, str): + raise ValueError(f"Given name for what we are resolving is invalid: {name!r}.") + + # It is possible that the received object has already been resolved. + # That is expected and is okay, so no check is made for it. + # For example, in the case of a Resampled we can receive the same object again. + + self._current_path_parts.append(name) + try: + yield + finally: + self._current_path_parts.pop() + + def was_already_resolved(self, obj: Any) -> bool: + return obj in self._resolved_objects + + def add_resolved(self, original: Any, resolved: Any) -> None: + if self.was_already_resolved(original): + raise ValueError( + f"Original object has already been resolved: {original!r}. " + + "\nIf you are doing resampling by name, " + + "make sure you are not forgetting to request resampling also for related objects." + + "\nOtherwise it could lead to infinite recursion." + ) + if isinstance(original, Resampled): + raise ValueError(f"Attempting to add a Resampled object to resolved values: {original!r}.") + self._resolved_objects[original] = resolved + + def get_resolved(self, obj: Any) -> Any: + try: + return self._resolved_objects[obj] + except KeyError: + raise ValueError(f"Given object was not already resolved. Please check first: {obj!r}") + + def sample_from(self, domain_obj: Domain) -> Any: + # Each `domain_obj` is only ever sampled from once. + # This is okay and the expected behavior. + # For each `domain_obj`, its sampled value is either directly stored itself, + # or is used in some other Resolvable. + # In both cases that sampled value is cached for later uses, + # and so the `domain_obj` will not be re-sampled from again. + if self.was_already_resolved(domain_obj): + raise ValueError( + f"We have already sampled a value for the given domain object: {domain_obj!r}." + + "\nThis should not be happening." + ) + + # The range compatibility identifier is there to make sure when we say + # the path matches, that the range for the value we are looking up also matches. + domain_obj_type_name = type(domain_obj).__name__.lower() + range_compatibility_identifier = domain_obj.range_compatibility_identifier + domain_obj_identifier = f"{domain_obj_type_name}__{range_compatibility_identifier}" + + current_path = ".".join(self._current_path_parts) + current_path += "::" + domain_obj_identifier + + if current_path in self._samplings_made: + # We have already sampled a value for this path. This should not happen. + # Every time we sample a domain, it should have its own different path. + raise ValueError( + f"We have already sampled a value for the current path: {current_path!r}." + + "\nThis should not be happening." + ) + + sampled_value = self._domain_sampler( + domain_obj=domain_obj, + current_path=current_path, + ) + + self._samplings_made[current_path] = sampled_value + return self._samplings_made[current_path] + + def get_value_from_environment(self, var_name: str) -> Any: + try: + return self._environment_values[var_name] + except KeyError: + raise ValueError(f"No value is available for the environment variable {var_name!r}.") + + +class SamplingResolver: + def __call__( + self, + obj: Resolvable, + domain_sampler: DomainSampler, + environment_values: Mapping[str, Any], + ) -> tuple[Resolvable, SamplingResolutionContext]: + context = SamplingResolutionContext( + resolution_root=obj, + domain_sampler=domain_sampler, + environment_values=environment_values, + ) + return self._resolve(obj, "Resolvable", context), context + + def _resolve(self, obj: Any, name: str, context: SamplingResolutionContext) -> Any: + with context.resolving(obj, name): + return self._resolver_dispatch(obj, context) + + @functools.singledispatchmethod + def _resolver_dispatch( + self, + any_obj: Any, + _context: SamplingResolutionContext, + ) -> Any: + # Default resolver. To be used for types which are not instances of `Resolvable`. + # No need to store or lookup from context, directly return the given object. + if isinstance(any_obj, Resolvable): + raise ValueError( + f"The default resolver is not supposed to be called for resolvable objects. Received: {any_obj!r}." + ) + return any_obj + + @_resolver_dispatch.register + def _( + self, + pipeline_obj: Pipeline, + context: SamplingResolutionContext, + ) -> Any: + if context.was_already_resolved(pipeline_obj): + return context.get_resolved(pipeline_obj) + + initial_attrs = pipeline_obj.get_attrs() + final_attrs = {} + needed_resolving = False + + for attr_name, initial_attr_value in initial_attrs.items(): + resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) + final_attrs[attr_name] = resolved_attr_value + needed_resolving = needed_resolving or (initial_attr_value is not resolved_attr_value) + + result = pipeline_obj + if needed_resolving: + result = pipeline_obj.from_attrs(final_attrs) + + context.add_resolved(pipeline_obj, result) + return result + + @_resolver_dispatch.register + def _( + self, + domain_obj: Domain, + context: SamplingResolutionContext, + ) -> Any: + if context.was_already_resolved(domain_obj): + return context.get_resolved(domain_obj) + + initial_attrs = domain_obj.get_attrs() + final_attrs = {} + needed_resolving = False + + for attr_name, initial_attr_value in initial_attrs.items(): + resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) + final_attrs[attr_name] = resolved_attr_value + needed_resolving = needed_resolving or (initial_attr_value is not resolved_attr_value) + + resolved_domain_obj = domain_obj + if needed_resolving: + resolved_domain_obj = domain_obj.from_attrs(final_attrs) + + try: + sampled_value = context.sample_from(resolved_domain_obj) + except Exception as e: + raise ValueError(f"Failed to sample from {resolved_domain_obj!r}.") from e + result = self._resolve(sampled_value, "sampled_value", context) + + context.add_resolved(domain_obj, result) + return result + + @_resolver_dispatch.register + def _( + self, + categorical_obj: Categorical, + context: SamplingResolutionContext, + ) -> Any: + if context.was_already_resolved(categorical_obj): + return context.get_resolved(categorical_obj) + + # In the case of categorical choices, we may skip resolving each choice initially, + # only after sampling we go into resolving whatever choice was chosen. + # This avoids resolving things which won't be needed at all. + # If the choices themselves come from some Resolvable, they will be resolved. + + initial_attrs = categorical_obj.get_attrs() + final_attrs = {} + needed_resolving = False + + for attr_name, initial_attr_value in initial_attrs.items(): + if attr_name == "choices": + if isinstance(initial_attr_value, Resolvable): + # Resolving here like below works fine since the expectation + # is that we will get back a tuple of choices. + # Any element in that tuple can be a Resolvable, + # but will not be resolved from the call directly below, + # as the tuple is returned as is, + # without going into resolving its elements. + # If we add a `_resolve_tuple` functionality to go into tuples + # and resolve their contents, the call below will likely + # lead to too much work being done or issues. + resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) + else: + resolved_attr_value = initial_attr_value + else: + resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) + final_attrs[attr_name] = resolved_attr_value + needed_resolving = needed_resolving or (initial_attr_value is not resolved_attr_value) + + resolved_categorical_obj = categorical_obj + if needed_resolving: + resolved_categorical_obj = cast(Categorical, categorical_obj.from_attrs(final_attrs)) + + try: + sampled_index = context.sample_from(resolved_categorical_obj) + except Exception as e: + raise ValueError(f"Failed to sample from {resolved_categorical_obj!r}.") from e + sampled_value = cast(tuple, resolved_categorical_obj.choices)[sampled_index] + result = self._resolve(sampled_value, "sampled_value", context) + + context.add_resolved(categorical_obj, result) + return result + + @_resolver_dispatch.register + def _( + self, + operation_obj: Operation, + context: SamplingResolutionContext, + ) -> Any: + if context.was_already_resolved(operation_obj): + return context.get_resolved(operation_obj) + + initial_attrs = operation_obj.get_attrs() + final_attrs = {} + needed_resolving = False + + for attr_name, initial_attr_value in initial_attrs.items(): + resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) + final_attrs[attr_name] = resolved_attr_value + needed_resolving = needed_resolving or (initial_attr_value is not resolved_attr_value) + + result = operation_obj + if needed_resolving: + result = operation_obj.from_attrs(final_attrs) + + context.add_resolved(operation_obj, result) + return result + + @_resolver_dispatch.register + def _( + self, + resampled_obj: Resampled, + context: SamplingResolutionContext, + ) -> Any: + # The results of Resampled are never stored or looked up from cache + # since it would break the logic of their expected behavior. + # Particularly, when Resampled objects are nested (at any depth) inside of + # other Resampled objects, adding them to the resolution context would result + # in the resolution not doing the right thing. + + if resampled_obj.is_resampling_by_name: + # We are dealing with a resampling by name, + # We will first need to look up the source object referenced by name. + # That will then be the object to resample. + referenced_obj_name = cast(str, resampled_obj.source) + referenced_obj = getattr(context.resolution_root, referenced_obj_name) + resampled_obj = Resampled(referenced_obj) + + initial_attrs = resampled_obj.get_attrs() + resolvable_to_resample_obj = resampled_obj.from_attrs(initial_attrs) + + type_name = type(resolvable_to_resample_obj).__name__.lower() + result = self._resolve(resolvable_to_resample_obj, f"resampled_{type_name}", context) + + return result + + @_resolver_dispatch.register + def _( + self, + fidelity_obj: Fidelity, + context: SamplingResolutionContext, + ) -> Any: + # A Fidelity object should only really be used in one place, + # so we check if we have seen it before. + # For that we will be storing its result in the resolved cache. + if context.was_already_resolved(fidelity_obj): + raise ValueError("Fidelity object reused multiple times in the pipeline.") + + # The way resolution works for Fidelity objects is that + # we use the domain inside it only to know the bounds for valid values. + # The actual value for the fidelity comes from the outside in the form of an + # environment value, which we look up by the attribute name of the + # received fidelity object inside the resolution root. + + names_for_this_fidelity_obj = list( + attr_name + for attr_name, attr_value in context.resolution_root.get_attrs().items() + if attr_value is fidelity_obj + ) + + if len(names_for_this_fidelity_obj) == 0: + raise ValueError("A fidelity object should be a direct attribute of the pipeline.") + elif len(names_for_this_fidelity_obj) > 1: + raise ValueError("A fidelity object should only be referenced once in the pipeline.") + + fidelity_name = names_for_this_fidelity_obj[0] + + try: + result = context.get_value_from_environment(fidelity_name) + except ValueError: + raise ValueError(f"No value is available in the environment for fidelity {fidelity_name!r}.") + + if not fidelity_obj.min_value <= result <= fidelity_obj.max_value: + raise ValueError( + f"Value for fidelity with name {fidelity_name!r} is outside its allowed range " + + f"[{fidelity_obj.min_value!r}, {fidelity_obj.max_value!r}]. " + + f"Received: {result!r}." + ) + + context.add_resolved(fidelity_obj, result) + return result + + @_resolver_dispatch.register + def _( + self, + resolvable_obj: Resolvable, + context: SamplingResolutionContext, + ) -> Any: + # Called when no specialized resolver was available for the specific resolvable type. + # That is not something that is normally expected. + raise ValueError(f"No specialized resolver was registered for object of type {type(resolvable_obj)!r}.") + + +def resolve( + pipeline: P, + domain_sampler: DomainSampler | None = None, + environment_values: Mapping[str, Any] | None = None, +) -> tuple[P, SamplingResolutionContext]: + if domain_sampler is None: + # By default, use a random sampler with no predefined values. + domain_sampler = RandomSampler(predefined_samplings={}) + + if environment_values is None: + # By default, have no environment values. + environment_values = {} + + sampling_resolver = SamplingResolver() + resolved_pipeline, context = sampling_resolver( + obj=pipeline, + domain_sampler=domain_sampler, + environment_values=environment_values, + ) + return cast(P, resolved_pipeline), context + + +# ------------------------------------------------- + + +def convert_operation_to_callable(operation: Operation) -> Callable: + operator = cast(Callable, operation.operator) + + operation_args = [] + for arg in operation.args: + if isinstance(arg, Operation): + arg = convert_operation_to_callable(arg) + operation_args.append(arg) + + operation_kwargs = {} + for kwarg_name, kwarg_value in operation.kwargs.items(): + if isinstance(kwarg_value, Operation): + kwarg_value = convert_operation_to_callable(kwarg_value) + operation_kwargs[kwarg_name] = kwarg_value + + return cast(Callable, operator(*operation_args, **operation_kwargs)) + + +def _operation_to_unwrapped_config( + operation: Operation | str, + level: int = 1, +) -> list[config_string.UnwrappedConfigStringPart]: + result = [] + + if isinstance(operation, Operation): + operator = operation.operator + kwargs = str(operation.kwargs) + item = config_string.UnwrappedConfigStringPart( + level=level, + opening_index=-1, + operator=operator, + hyperparameters=kwargs, + operands="", + ) + result.append(item) + for operand in operation.args: + result.extend(_operation_to_unwrapped_config(operand, level + 1)) + else: + item = config_string.UnwrappedConfigStringPart( + level=level, + opening_index=-1, + operator=operation, + hyperparameters="", + operands="", + ) + result.append(item) + return result + + +def convert_operation_to_string(operation: Operation) -> str: + unwrapped_config = tuple(_operation_to_unwrapped_config(operation)) + return cast(str, config_string.wrap_config_into_string(unwrapped_config)) + + +# ------------------------------------------------- + + +class RandomSearch: + def __init__(self, pipeline: Pipeline): + self._pipeline = pipeline + + self._environment_values = {} + fidelity_attrs = self._pipeline.fidelity_attrs + for fidelity_name, fidelity_obj in fidelity_attrs.items(): + self._environment_values[fidelity_name] = fidelity_obj.max_value + + self._random_sampler = RandomSampler(predefined_samplings={}) + + def __call__( + self, + trials: Mapping[str, trial_state.Trial], + budget_info: optimizer_state.BudgetInfo | None, + n: int | None = None, + ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: + n_prev_trials = len(trials) + n_requested = 1 if n is None else n + return_single = n is None + + chosen_pipelines = [ + resolve( + pipeline=self._pipeline, + domain_sampler=self._random_sampler, + environment_values=self._environment_values, + ) + for _ in range(n_requested) + ] + + return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) + + +class ComplexRandomSearch: + def __init__(self, pipeline: Pipeline): + self._pipeline = pipeline + + self._environment_values = {} + fidelity_attrs = self._pipeline.fidelity_attrs + for fidelity_name, fidelity_obj in fidelity_attrs.items(): + self._environment_values[fidelity_name] = fidelity_obj.max_value + + self._random_sampler = RandomSampler( + predefined_samplings={}, + ) + self._try_always_priors_sampler = PriorOrFallbackSampler( + fallback_sampler=self._random_sampler, + prior_use_probability=1, + ) + self._sometimes_priors_sampler = PriorOrFallbackSampler( + fallback_sampler=self._random_sampler, + prior_use_probability=0.1, + ) + + def __call__( + self, + trials: Mapping[str, trial_state.Trial], + budget_info: optimizer_state.BudgetInfo | None, + n: int | None = None, + ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: + n_prev_trials = len(trials) + n_requested = 1 if n is None else n + return_single = n is None + + random_pipelines = [ + resolve( + pipeline=self._pipeline, + domain_sampler=self._random_sampler, + environment_values=self._environment_values, + ) + for _ in range(n_requested * 5) + ] + sometimes_priors_pipelines = [ + resolve( + pipeline=self._pipeline, + domain_sampler=self._sometimes_priors_sampler, + environment_values=self._environment_values, + ) + for _ in range(n_requested * 5) + ] + + mutated_incumbents = [] + crossed_over_incumbents = [] + + successful_trials = list( + filter( + lambda trial: trial.report.reported_as == trial.State.SUCCESS, + trials.values(), + ) + ) + if len(successful_trials) > 0: + n_top_trials = 5 + top_trials = heapq.nsmallest( + n_top_trials, + successful_trials, + key=lambda trial: trial.report.objective_to_minimize, + ) # Will have up to `n_top_trials` items. + + # Do some mutations. + for top_trial in top_trials: + top_trial_config = top_trial.config + mutated_incumbents += [ + resolve( + pipeline=self._pipeline, + domain_sampler=MutateByForgettingSampler( + predefined_samplings=top_trial_config, + n_forgets=1, + ), + environment_values=self._environment_values, + ) + for _ in range(n_requested * 5) + ] + mutated_incumbents += [ + resolve( + pipeline=self._pipeline, + domain_sampler=MutateByForgettingSampler( + predefined_samplings=top_trial_config, + n_forgets=random.randint(1, len(top_trial_config)), + ), + environment_values=self._environment_values, + ) + for _ in range(n_requested * 5) + ] + + # Do some crossovers. + if len(top_trials) > 1: + for _ in range(n_requested * 3): + trial_1, trial_2 = random.sample(top_trials, k=2) + + try: + crossover_sampler = CrossoverByMixingSampler( + predefined_samplings_1=trial_1.config, + predefined_samplings_2=trial_2.config, + prefer_first_probability=0.5, + ) + except CrossoverNotPossibleError: + # A crossover was not possible for them. Do nothing. + pass + else: + crossed_over_incumbents.append( + resolve( + pipeline=self._pipeline, + domain_sampler=crossover_sampler, + environment_values=self._environment_values, + ), + ) + + try: + crossover_sampler = CrossoverByMixingSampler( + predefined_samplings_1=trial_2.config, + predefined_samplings_2=trial_1.config, + prefer_first_probability=0.5, + ) + except CrossoverNotPossibleError: + # A crossover was not possible for them. Do nothing. + pass + else: + crossed_over_incumbents.append( + resolve( + pipeline=self._pipeline, + domain_sampler=crossover_sampler, + environment_values=self._environment_values, + ), + ) + + all_sampled_pipelines = [ + *random_pipelines, + *sometimes_priors_pipelines, + *mutated_incumbents, + *crossed_over_incumbents, + ] + + # Here we can have a model which picks from all the sampled pipelines. + # Currently, we just pick randomly from them. + chosen_pipelines = random.sample(all_sampled_pipelines, k=n_requested) + + if n_prev_trials == 0: + # In this case, always include the prior pipeline. + prior_pipeline = resolve( + pipeline=self._pipeline, + domain_sampler=self._try_always_priors_sampler, + environment_values=self._environment_values, + ) + chosen_pipelines[0] = prior_pipeline + + return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) + + +# ------------------------------------------------- + + +class _NepsCompatConverter: + _SAMPLING_PREFIX = "SAMPLING__" + _ENVIRONMENT_PREFIX = "ENVIRONMENT__" + _SAMPLING_PREFIX_LEN = len(_SAMPLING_PREFIX) + _ENVIRONMENT_PREFIX_LEN = len(_ENVIRONMENT_PREFIX) + + @dataclasses.dataclass(frozen=True) + class _FromNepsConfigResult: + predefined_samplings: Mapping[str, Any] + environment_values: Mapping[str, Any] + extra_kwargs: Mapping[str, Any] + + @classmethod + def to_neps_config( + cls, + resolution_context: SamplingResolutionContext, + ) -> Mapping[str, Any]: + config: dict[str, Any] = {} + + samplings_made = resolution_context.samplings_made + for sampling_path, value in samplings_made.items(): + config[f"{cls._SAMPLING_PREFIX}{sampling_path}"] = value + + environment_values = resolution_context.environment_values + for env_name, value in environment_values.items(): + config[f"{cls._ENVIRONMENT_PREFIX}{env_name}"] = value + + return config + + @classmethod + def from_neps_config( + cls, + config: Mapping[str, Any], + ) -> _FromNepsConfigResult: + predefined_samplings = {} + environment_values = {} + extra_kwargs = {} + + for name, value in config.items(): + if name.startswith(cls._SAMPLING_PREFIX): + sampling_path = name[cls._SAMPLING_PREFIX_LEN :] + predefined_samplings[sampling_path] = value + elif name.startswith(cls._ENVIRONMENT_PREFIX): + env_name = name[cls._ENVIRONMENT_PREFIX_LEN :] + environment_values[env_name] = value + else: + extra_kwargs[name] = value + + return cls._FromNepsConfigResult( + predefined_samplings=predefined_samplings, + environment_values=environment_values, + extra_kwargs=extra_kwargs, + ) + + +def _prepare_sampled_configs( + chosen_pipelines: list[tuple[Pipeline, SamplingResolutionContext]], + n_prev_trials: int, + return_single: bool, +) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: + configs = [] + for i, (_resolved_pipeline, resolution_context) in enumerate(chosen_pipelines): + neps_config = _NepsCompatConverter.to_neps_config( + resolution_context=resolution_context, + ) + + config = optimizer.SampledConfig( + config=neps_config, + id=str(n_prev_trials + i + 1), + previous_config_id=None, + ) + configs.append(config) + + if return_single: + return configs[0] + + return configs + + +def adjust_evaluation_pipeline_for_new_space( + evaluation_pipeline: Callable, + pipeline_space: P, + operation_converter: Callable[[Operation], Any] = convert_operation_to_callable, +) -> Callable | str: + @functools.wraps(evaluation_pipeline) + def inner(*args: Any, **kwargs: Any) -> Any: + # `kwargs` can contain other things not related to + # the samplings to make or to environment values. + # That is not an issue. Those items will be passed through. + + sampled_pipeline_data = _NepsCompatConverter.from_neps_config(config=kwargs) + + sampled_pipeline, _resolution_context = resolve( + pipeline=pipeline_space, + domain_sampler=OnlyPredefinedValuesSampler( + predefined_samplings=sampled_pipeline_data.predefined_samplings, + ), + environment_values=sampled_pipeline_data.environment_values, + ) + + config = dict(**sampled_pipeline.get_attrs()) + for name, value in config.items(): + if isinstance(value, Operation): + config[name] = operation_converter(value) + + # So that we still pass the kwargs not related to the config. + # Take away all the kwargs which were related to samplings made. + new_kwargs = dict(**sampled_pipeline_data.extra_kwargs) + # Then add all the kwargs from the config. + new_kwargs.update(config) + + return evaluation_pipeline(*args, **new_kwargs) + + return inner diff --git a/neps/space/new_space/tests/test_neps_integration.py b/neps/space/new_space/tests/test_neps_integration.py new file mode 100644 index 000000000..ee018c563 --- /dev/null +++ b/neps/space/new_space/tests/test_neps_integration.py @@ -0,0 +1,334 @@ +from typing import Sequence, Callable + +import pytest + +import neps +import neps.space.new_space.space as space + + +def hyperparameter_pipeline_to_optimize( + float1: float, + float2: float, + categorical: int, + integer1: int, + integer2: int, +): + # print() + # print(f"Evaluating trial: {float1}, {float2}, {categorical}, {integer1}, {integer2}") + + assert isinstance(float1, float) + assert isinstance(float2, float) + assert isinstance(categorical, int) + assert isinstance(integer1, int) + assert isinstance(integer2, int) + + objective_to_minimize = -float(float1 + float2 + categorical + integer1 + integer2) + assert isinstance(objective_to_minimize, float) + + # print(f"Score: {objective_to_minimize}") + return objective_to_minimize + + +class DemoHyperparameterSpace(space.Pipeline): + float1 = space.Float( + min_value=0, + max_value=1, + prior=0.1, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + float2 = space.Float( + min_value=-10, + max_value=10, + prior=0.1, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + categorical = space.Categorical( + choices=(0, 1), + prior_index=0, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + integer1 = space.Integer( + min_value=0, + max_value=1, + prior=0, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + integer2 = space.Integer( + min_value=1, + max_value=1000, + prior=10, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + + +class DemoHyperparameterWithFidelitySpace(space.Pipeline): + float1 = space.Float( + min_value=0, + max_value=1, + prior=0.1, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + float2 = space.Float( + min_value=-10, + max_value=10, + prior=0.1, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + categorical = space.Categorical( + choices=(0, 1), + prior_index=0, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + integer1 = space.Integer( + min_value=0, + max_value=1, + prior=0, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + integer2 = space.Fidelity( + space.Integer( + min_value=1, + max_value=1000, + ), + ) + + +class DemoHyperparameterComplexSpace(space.Pipeline): + _small_float = space.Float( + min_value=0, + max_value=1, + prior=0.1, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + _big_float = space.Float( + min_value=10, + max_value=100, + prior=20, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + + float1 = space.Categorical( + choices=( + space.Resampled(_small_float), + space.Resampled(_big_float), + ), + prior_index=0, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + float2 = space.Categorical( + choices=( + space.Resampled(_small_float), + space.Resampled(_big_float), + float1, + ), + prior_index=0, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + categorical = space.Categorical( + choices=(0, 1), + prior_index=0, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + integer1 = space.Integer( + min_value=0, + max_value=1, + prior=0, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + integer2 = space.Integer( + min_value=1, + max_value=1000, + prior=10, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + + +@pytest.mark.parametrize( + "optimizer", + [space.RandomSearch, space.ComplexRandomSearch], +) +def test_hyperparameter_demo(optimizer): + pipeline_space = DemoHyperparameterSpace() + root_directory = f"results/hyperparameter_demo__{optimizer.__name__}" + + neps.run( + evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( + hyperparameter_pipeline_to_optimize, + pipeline_space, + ), + pipeline_space=pipeline_space, + optimizer=optimizer, + root_directory=root_directory, + post_run_summary=True, + max_evaluations_total=10, + overwrite_working_directory=True, + ) + print() + print(f"\nRoot directory: {root_directory}") + neps.status(root_directory, print_summary=True) + + +@pytest.mark.parametrize( + "optimizer", + [space.RandomSearch, space.ComplexRandomSearch], +) +def test_hyperparameter_with_fidelity_demo(optimizer): + pipeline_space = DemoHyperparameterWithFidelitySpace() + root_directory = f"results/hyperparameter_with_fidelity_demo__{optimizer.__name__}" + + neps.run( + evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( + hyperparameter_pipeline_to_optimize, + pipeline_space, + ), + pipeline_space=pipeline_space, + optimizer=optimizer, + root_directory=root_directory, + post_run_summary=True, + max_evaluations_total=10, + overwrite_working_directory=True, + ) + print() + print(f"\nRoot directory: {root_directory}") + neps.status(root_directory, print_summary=True) + + +@pytest.mark.parametrize( + "optimizer", + [space.RandomSearch, space.ComplexRandomSearch], +) +def test_hyperparameter_complex_demo(optimizer): + pipeline_space = DemoHyperparameterComplexSpace() + root_directory = f"results/hyperparameter_complex_demo__{optimizer.__name__}" + + neps.run( + evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( + hyperparameter_pipeline_to_optimize, + pipeline_space, + ), + pipeline_space=pipeline_space, + optimizer=optimizer, + root_directory=root_directory, + post_run_summary=True, + max_evaluations_total=10, + overwrite_working_directory=True, + ) + print() + print(f"\nRoot directory: {root_directory}") + neps.status(root_directory, print_summary=True) + + +# ----------------------------------------- + + +class Model: + def __init__( + self, + inner_function: Callable[[Sequence[float]], float], + factor: float, + ): + self.inner_function = inner_function + self.factor = factor + + def __call__(self, values: Sequence[float]) -> float: + return self.factor * self.inner_function(values) + + +class Sum: + def __call__(self, values: Sequence[float]) -> float: + return sum(values) + + +class MultipliedSum: + def __init__(self, factor: float): + self.factor = factor + + def __call__(self, values: Sequence[float]) -> float: + return self.factor * sum(values) + + +def operation_pipeline_to_optimize(model: Model, some_hp: str): + # print() + # print(f"Evaluating trial: {model}") + + assert isinstance(model, Model) + assert isinstance(model.factor, float) + assert isinstance(model.inner_function, (Sum, MultipliedSum)) + if isinstance(model.inner_function, MultipliedSum): + assert isinstance(model.inner_function.factor, float) + assert some_hp in {"hp1", "hp2"} + + values = list(range(1, 21)) + objective_to_minimize = model(values) + assert isinstance(objective_to_minimize, float) + + # print(f"Score: {objective_to_minimize}") + return objective_to_minimize + + +class DemoOperationSpace(space.Pipeline): + # The way to sample `factor` values + _factor = space.Float( + min_value=0, + max_value=1, + prior=0.1, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + + # Sum + # Will be equivalent to something like + # `Sum()` + # Could have also been defined using the python `sum` function as + # `_sum = space.Operation(operator=lambda: sum)` + _sum = space.Operation(operator=Sum) + + # MultipliedSum + # Will be equivalent to something like + # `MultipliedSum(factor=0.2)` + _multiplied_sum = space.Operation( + operator=MultipliedSum, + kwargs={"factor": space.Resampled(_factor)}, + ) + + # Model + # Will be equivalent to something like one of + # `Model(Sum(), factor=0.1)` + # `Model(MultipliedSum(factor=0.2), factor=0.1)` + _inner_function = space.Categorical( + choices=(_sum, _multiplied_sum), + ) + model = space.Operation( + operator=Model, + args=(_inner_function,), + kwargs={"factor": space.Resampled(_factor)}, + ) + + # An additional hyperparameter + some_hp = space.Categorical( + choices=("hp1", "hp2"), + ) + + +@pytest.mark.parametrize( + "optimizer", + [space.RandomSearch, space.ComplexRandomSearch], +) +def test_operation_demo(optimizer): + pipeline_space = DemoOperationSpace() + root_directory = f"results/operation_demo__{optimizer.__name__}" + + neps.run( + evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( + operation_pipeline_to_optimize, + pipeline_space, + ), + pipeline_space=pipeline_space, + optimizer=optimizer, + root_directory=root_directory, + post_run_summary=True, + max_evaluations_total=10, + overwrite_working_directory=True, + ) + print() + print(f"\nRoot directory: {root_directory}") + neps.status(root_directory, print_summary=True) diff --git a/neps/space/new_space/tests/test_search_space__fidelity.py b/neps/space/new_space/tests/test_search_space__fidelity.py new file mode 100644 index 000000000..fb3a265a1 --- /dev/null +++ b/neps/space/new_space/tests/test_search_space__fidelity.py @@ -0,0 +1,110 @@ +import re + +import pytest + +import neps.space.new_space.space as space + + +class DemoHyperparametersWithFidelitySpace(space.Pipeline): + constant1: int = 42 + float1: float = space.Float( + min_value=0, + max_value=1, + prior=0.1, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + fidelity_integer1: int = space.Fidelity( + domain=space.Integer( + min_value=1, + max_value=1000, + ), + ) + + +def test_fidelity_creation_raises_when_domain_has_prior(): + # Creating a fidelity object with a domain that has a prior should not be possible. + with pytest.raises( + ValueError, + match=re.escape("The domain of a Fidelity can not have priors: "), + ): + space.Fidelity( + domain=space.Integer( + min_value=1, + max_value=1000, + prior=10, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ), + ) + + +def test_fidelity_resolution_raises_when_resolved_with_no_environment_value(): + pipeline = DemoHyperparametersWithFidelitySpace() + + # Resolve a pipeline which contains a fidelity with an empty environment. + with pytest.raises( + ValueError, + match=re.escape( + "No value is available in the environment for fidelity 'fidelity_integer1'.", + ), + ): + space.resolve(pipeline=pipeline) + + +def test_fidelity_resolution_raises_when_resolved_with_invalid_value(): + pipeline = DemoHyperparametersWithFidelitySpace() + + # Resolve a pipeline which contains a fidelity, + # with an environment value for it, that is out of the allowed range. + with pytest.raises( + ValueError, + match=re.escape( + "Value for fidelity with name 'fidelity_integer1' is outside its allowed range [1, 1000]. Received: -10." + ), + ): + space.resolve( + pipeline=pipeline, + environment_values={"fidelity_integer1": -10}, + ) + + +def test_fidelity_resolution_works(): + pipeline = DemoHyperparametersWithFidelitySpace() + + # Resolve a pipeline which contains a fidelity, + # with a valid value for it in the environment. + resolved_pipeline, resolution_context = space.resolve( + pipeline=pipeline, + environment_values={"fidelity_integer1": 10}, + ) + + assert resolved_pipeline.constant1 == 42 + assert 0.0 <= resolved_pipeline.float1 <= 1.0 + assert resolved_pipeline.fidelity_integer1 == 10 + + +def test_fidelity_resolution_with_context_works(): + pipeline = DemoHyperparametersWithFidelitySpace() + + samplings_to_make = { + "Resolvable.float1::float__0_1_False": 0.5, + } + environment_values = { + "fidelity_integer1": 10, + } + + # Resolve a pipeline which contains a fidelity, + # with a valid value for it in the environment. + resolved_pipeline, resolution_context = space.resolve( + pipeline=pipeline, + domain_sampler=space.OnlyPredefinedValuesSampler( + predefined_samplings=samplings_to_make, + ), + environment_values=environment_values, + ) + + assert resolved_pipeline.constant1 == 42 + assert resolved_pipeline.float1 == 0.5 + assert resolved_pipeline.fidelity_integer1 == 10 + + assert resolution_context.samplings_made == samplings_to_make + assert resolution_context.environment_values == environment_values diff --git a/neps/space/new_space/tests/test_search_space__hnas_like.py b/neps/space/new_space/tests/test_search_space__hnas_like.py new file mode 100644 index 000000000..a3728de55 --- /dev/null +++ b/neps/space/new_space/tests/test_search_space__hnas_like.py @@ -0,0 +1,299 @@ +import pytest + +from neps.space.new_space import space +from neps.space.new_space import config_string + + +class HNASLikePipeline(space.Pipeline): + """ + Based on the `hierarchical+shared` variant (cell block is shared everywhere). + Across _CONVBLOCK items, _ACT and _CONV also shared. Only the _NORM changes. + + Additionally, this variant now has a PReLU operation with a float hyperparameter (init). + The same value of that hyperparameter would is used everywhere a _PRELU is used. + """ + + # ------------------------------------------------------ + # Adding `PReLU` with a float hyperparameter `init` + # Note that the sampled `_prelu_init_value` will be shared across all `_PRELU` uses, + # since no `Resampled` was requested for it + _prelu_init_value = space.Float(min_value=0.1, max_value=0.9) + _PRELU = space.Operation( + operator="ACT prelu", + kwargs={"init": _prelu_init_value}, + ) + # ------------------------------------------------------ + + # Added `_PRELU` to the possible `_ACT` choices + _ACT = space.Categorical( + choices=( + space.Operation(operator="ACT relu"), + space.Operation(operator="ACT hardswish"), + space.Operation(operator="ACT mish"), + _PRELU, + ), + ) + _CONV = space.Categorical( + choices=( + space.Operation(operator="CONV conv1x1"), + space.Operation(operator="CONV conv3x3"), + space.Operation(operator="CONV dconv3x3"), + ), + ) + _NORM = space.Categorical( + choices=( + space.Operation(operator="NORM batch"), + space.Operation(operator="NORM instance"), + space.Operation(operator="NORM layer"), + ), + ) + + _CONVBLOCK = space.Operation( + operator="CONVBLOCK Sequential3", + args=( + _ACT, + _CONV, + space.Resampled(_NORM), + ), + ) + _CONVBLOCK_FULL = space.Operation( + operator="OPS Sequential1", + args=(space.Resampled(_CONVBLOCK),), + ) + _OP = space.Categorical( + choices=( + space.Operation(operator="OPS zero"), + space.Operation(operator="OPS id"), + space.Operation(operator="OPS avg_pool"), + space.Resampled(_CONVBLOCK_FULL), + ), + ) + + CL = space.Operation( + operator="CELL Cell", + args=( + space.Resampled(_OP), + space.Resampled(_OP), + space.Resampled(_OP), + space.Resampled(_OP), + space.Resampled(_OP), + space.Resampled(_OP), + ), + ) + + _C = space.Categorical( + choices=( + space.Operation(operator="C Sequential2", args=(CL, CL)), + space.Operation(operator="C Sequential3", args=(CL, CL, CL)), + space.Operation(operator="C Residual2", args=(CL, CL, CL)), + ), + ) + + _RESBLOCK = space.Operation(operator="resBlock") + _DOWN = space.Categorical( + choices=( + space.Operation(operator="DOWN Sequential2", args=(CL, _RESBLOCK)), + space.Operation(operator="DOWN Sequential3", args=(CL, CL, _RESBLOCK)), + space.Operation(operator="DOWN Residual2", args=(CL, _RESBLOCK, _RESBLOCK)), + ), + ) + + _D0 = space.Categorical( + choices=( + space.Operation( + operator="D0 Sequential3", + args=( + space.Resampled(_C), + space.Resampled(_C), + CL, + ), + ), + space.Operation( + operator="D0 Sequential4", + args=( + space.Resampled(_C), + space.Resampled(_C), + space.Resampled(_C), + CL, + ), + ), + space.Operation( + operator="D0 Residual3", + args=( + space.Resampled(_C), + space.Resampled(_C), + CL, + CL, + ), + ), + ), + ) + _D1 = space.Categorical( + choices=( + space.Operation( + operator="D1 Sequential3", + args=( + space.Resampled(_C), + space.Resampled(_C), + space.Resampled(_DOWN), + ), + ), + space.Operation( + operator="D1 Sequential4", + args=( + space.Resampled(_C), + space.Resampled(_C), + space.Resampled(_C), + space.Resampled(_DOWN), + ), + ), + space.Operation( + operator="D1 Residual3", + args=( + space.Resampled(_C), + space.Resampled(_C), + space.Resampled(_DOWN), + space.Resampled(_DOWN), + ), + ), + ), + ) + + _D2 = space.Categorical( + choices=( + space.Operation( + operator="D2 Sequential3", + args=( + space.Resampled(_D1), + space.Resampled(_D1), + space.Resampled(_D0), + ), + ), + space.Operation( + operator="D2 Sequential3", + args=( + space.Resampled(_D0), + space.Resampled(_D1), + space.Resampled(_D1), + ), + ), + space.Operation( + operator="D2 Sequential4", + args=( + space.Resampled(_D1), + space.Resampled(_D1), + space.Resampled(_D0), + space.Resampled(_D0), + ), + ), + ), + ) + + ARCH: space.Operation = _D2 + + +@pytest.mark.repeat(500) +def test_hnas_like(): + pipeline = HNASLikePipeline() + + resolved_pipeline, resolution_context = space.resolve(pipeline) + assert resolved_pipeline is not None + assert resolution_context.samplings_made is not None + assert tuple(resolved_pipeline.get_attrs().keys()) == ("CL", "ARCH") + + +@pytest.mark.repeat(500) +def test_hnas_like_string(): + pipeline = HNASLikePipeline() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + arch = resolved_pipeline.ARCH + arch_config_string = space.convert_operation_to_string(arch) + assert arch_config_string + pretty_config = config_string.ConfigString(arch_config_string).pretty_format() + assert pretty_config + + cl = resolved_pipeline.CL + cl_config_string = space.convert_operation_to_string(cl) + assert cl_config_string + pretty_config = config_string.ConfigString(cl_config_string).pretty_format() + assert pretty_config + + +def test_hnas_like_context(): + samplings_to_make = { + "Resolvable.CL.args[0].resampled_categorical::categorical__4": 3, + "Resolvable.CL.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[0]::categorical__4": 0, + "Resolvable.CL.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[1]::categorical__3": 2, + "Resolvable.CL.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3": 0, + "Resolvable.CL.args[1].resampled_categorical::categorical__4": 0, + "Resolvable.CL.args[2].resampled_categorical::categorical__4": 1, + "Resolvable.CL.args[3].resampled_categorical::categorical__4": 2, + "Resolvable.CL.args[4].resampled_categorical::categorical__4": 3, + "Resolvable.CL.args[4].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3": 2, + "Resolvable.CL.args[5].resampled_categorical::categorical__4": 0, + "Resolvable.ARCH::categorical__3": 1, + "Resolvable.ARCH.sampled_value.args[0].resampled_categorical::categorical__3": 2, + "Resolvable.ARCH.sampled_value.args[0].resampled_categorical.sampled_value.args[0].resampled_categorical::categorical__3": 2, + "Resolvable.ARCH.sampled_value.args[0].resampled_categorical.sampled_value.args[1].resampled_categorical::categorical__3": 0, + "Resolvable.ARCH.sampled_value.args[1].resampled_categorical::categorical__3": 2, + "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[0].resampled_categorical::categorical__3": 0, + "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[1].resampled_categorical::categorical__3": 0, + "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[2].resampled_categorical::categorical__3": 0, + "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[3].resampled_categorical::categorical__3": 1, + "Resolvable.ARCH.sampled_value.args[2].resampled_categorical::categorical__3": 2, + } + + expected_cl_config_string = "(CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))" + expected_arch_config_string = "(D2 Sequential3 (D0 Residual3 (C Residual2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (C Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (D1 Residual3 (C Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (C Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (DOWN Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) resBlock) (DOWN Sequential3 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) resBlock)) (D1 Residual3 (C Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (C Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (DOWN Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) resBlock) (DOWN Sequential3 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) resBlock)))" + + pipeline = HNASLikePipeline() + + resolved_pipeline, resolution_context = space.resolve( + pipeline=pipeline, + domain_sampler=space.OnlyPredefinedValuesSampler( + predefined_samplings=samplings_to_make, + ), + ) + sampled_values = resolution_context.samplings_made + + assert resolved_pipeline is not None + assert sampled_values is not None + assert sampled_values is not samplings_to_make + assert sampled_values == samplings_to_make + assert list(sampled_values.items()) == list(samplings_to_make.items()) + + # we should have made exactly those samplings + assert sampled_values == samplings_to_make + + cl = resolved_pipeline.CL + cl_config_string = space.convert_operation_to_string(cl) + assert cl_config_string + assert cl_config_string == expected_cl_config_string + assert "NORM batch" in cl_config_string + assert "NORM layer" in cl_config_string + + arch = resolved_pipeline.ARCH + arch_config_string = space.convert_operation_to_string(arch) + assert arch_config_string + assert arch_config_string == expected_arch_config_string + assert cl_config_string in arch_config_string + + # print() + # print("Sampled CELL: " + cl_config_string) + # print("Sampled ARCH: " + arch_config_string) + # print("Sampled values:") + # import pprint + # + # # pprint.pp(sampled_values, indent=2, compact=True) + # + # print() + # + # print("ARCH received:") + # pretty_config = config_string.ConfigString(arch_config_string).pretty_format() + # print(pretty_config) + # + # print("Arch expected:") + # pretty_config = config_string.ConfigString(expected_arch_config_string).pretty_format() + # print(pretty_config) diff --git a/neps/space/new_space/tests/test_search_space__recursion.py b/neps/space/new_space/tests/test_search_space__recursion.py new file mode 100644 index 000000000..6ea47a690 --- /dev/null +++ b/neps/space/new_space/tests/test_search_space__recursion.py @@ -0,0 +1,90 @@ +from typing import Callable, Sequence + +from neps.space.new_space import space + + +class Model: + def __init__( + self, + inner_function: Callable[[Sequence[float]], float], + factor: float, + ): + self.inner_function = inner_function + self.factor = factor + + def __call__(self, values: Sequence[float]) -> float: + return self.factor * self.inner_function(values) + + +class Sum: + def __call__(self, values: Sequence[float]) -> float: + return sum(values) + + +class DemoRecursiveOperationSpace(space.Pipeline): + # The way to sample `factor` values + _factor = space.Float(min_value=0, max_value=1) + + # Sum + _sum = space.Operation(operator=Sum) + + # Model + # Can recursively request itself as an arg. + # Will be equivalent to something like one of + # `Model(Sum(), factor=0.1)` + # `Model(Model(Sum(), factor=0.1), factor=0.1)` + # `Model(Model(Model(Sum(), factor=0.1), factor=0.1), factor=0.1)` + # ... + # If we want the `factor` values to be different, + # we just request a resample for them + _inner_function = space.Categorical( + choices=(_sum, space.Resampled("model")), + ) + model = space.Operation( + operator=Model, + args=(space.Resampled(_inner_function),), + kwargs={"factor": _factor}, + ) + + +def test_recursion(): + pipeline = DemoRecursiveOperationSpace() + + # Across `n` iterations we collect the number of seen inner `Model` counts. + # We expect to see at least `k` cases for that number + expected_minimal_number_of_recursions = 3 + seen_inner_model_counts = [] + + for _ in range(200): + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + model = resolved_pipeline.model + assert model.operator is Model + + inner_function = model.args[0] + seen_factors = [model.kwargs["factor"]] + seen_inner_model_count = 0 + + # Loop into the inner operators until we have no more nested `Model` args + while inner_function.operator is Model: + seen_factors.append(inner_function.kwargs["factor"]) + seen_inner_model_count += 1 + inner_function = inner_function.args[0] + + # At this point we should have gone deep enough to have the terminal `Sum` + assert inner_function.operator is Sum + + # We should have seen as many factors as inner models + 1 for the outer one + assert len(seen_factors) == seen_inner_model_count + 1 + + # All the factors should be the same value (shared) + assert len(set(seen_factors)) == 1 + assert isinstance(seen_factors[0], float) + + # Add the number of seen `Model` operator in the loop + seen_inner_model_counts.append(seen_inner_model_count) + + assert len(set(seen_inner_model_counts)) >= expected_minimal_number_of_recursions + + +# TODO: test context with recursion (`samplings_to_make`) diff --git a/neps/space/new_space/tests/test_search_space__resampled.py b/neps/space/new_space/tests/test_search_space__resampled.py new file mode 100644 index 000000000..da16fd045 --- /dev/null +++ b/neps/space/new_space/tests/test_search_space__resampled.py @@ -0,0 +1,274 @@ +import pytest + +from neps.space.new_space import space + + +class ActPipelineSimpleFloat(space.Pipeline): + prelu_init_value = space.Float( + min_value=0, + max_value=1000000, + log=False, + prior=0.25, + prior_confidence=space.ConfidenceLevel.LOW, + ) + + prelu_shared1 = space.Operation( + operator="prelu", + kwargs={"init": prelu_init_value}, + ) + prelu_shared2 = space.Operation( + operator="prelu", + kwargs={"init": prelu_init_value}, + ) + + prelu_own_clone1 = space.Operation( + operator="prelu", + kwargs={"init": space.Resampled(prelu_init_value)}, + ) + prelu_own_clone2 = space.Operation( + operator="prelu", + kwargs={"init": space.Resampled(prelu_init_value)}, + ) + + _prelu_init_resampled = space.Resampled(prelu_init_value) + prelu_common_clone1 = space.Operation( + operator="prelu", + kwargs={"init": _prelu_init_resampled}, + ) + prelu_common_clone2 = space.Operation( + operator="prelu", + kwargs={"init": _prelu_init_resampled}, + ) + + +class ActPipelineComplexInteger(space.Pipeline): + prelu_init_value = space.Integer(min_value=0, max_value=1000000) + + prelu_shared1 = space.Operation( + operator="prelu", + kwargs={"init": prelu_init_value}, + ) + prelu_shared2 = space.Operation( + operator="prelu", + kwargs={"init": prelu_init_value}, + ) + + prelu_own_clone1 = space.Operation( + operator="prelu", + kwargs={"init": space.Resampled(prelu_init_value)}, + ) + prelu_own_clone2 = space.Operation( + operator="prelu", + kwargs={"init": space.Resampled(prelu_init_value)}, + ) + + _prelu_init_resampled = space.Resampled(prelu_init_value) + prelu_common_clone1 = space.Operation( + operator="prelu", + kwargs={"init": _prelu_init_resampled}, + ) + prelu_common_clone2 = space.Operation( + operator="prelu", + kwargs={"init": _prelu_init_resampled}, + ) + + act: space.Operation = space.Operation( + operator="sequential6", + args=( + prelu_shared1, + prelu_shared2, + prelu_own_clone1, + prelu_own_clone2, + prelu_common_clone1, + prelu_common_clone2, + ), + kwargs={ + "prelu_shared": prelu_shared1, + "prelu_own_clone": prelu_own_clone1, + "prelu_common_clone": prelu_common_clone1, + "resampled_hp_value": space.Resampled(prelu_init_value), + }, + ) + + +class CellPipelineCategorical(space.Pipeline): + conv_block = space.Categorical( + choices=( + space.Operation(operator="conv1"), + space.Operation(operator="conv2"), + ), + ) + + op1 = space.Categorical( + choices=( + conv_block, + space.Operation("op1"), + ), + ) + op2 = space.Categorical( + choices=( + space.Resampled(conv_block), + space.Operation("op2"), + ), + ) + + _resampled_op1 = space.Resampled(op1) + cell = space.Operation( + operator="cell", + args=( + op1, + op2, + _resampled_op1, + space.Resampled(op2), + _resampled_op1, + space.Resampled(op2), + ), + ) + + +@pytest.mark.repeat(200) +def test_resampled_float(): + pipeline = ActPipelineSimpleFloat() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + assert resolved_pipeline is not None + assert tuple(resolved_pipeline.get_attrs().keys()) == ( + "prelu_init_value", + "prelu_shared1", + "prelu_shared2", + "prelu_own_clone1", + "prelu_own_clone2", + "prelu_common_clone1", + "prelu_common_clone2", + ) + + prelu_init_value = resolved_pipeline.prelu_init_value + prelu_shared1 = resolved_pipeline.prelu_shared1.kwargs["init"] + prelu_shared2 = resolved_pipeline.prelu_shared2.kwargs["init"] + resampled_values = ( + resolved_pipeline.prelu_own_clone1.kwargs["init"], + resolved_pipeline.prelu_own_clone2.kwargs["init"], + resolved_pipeline.prelu_common_clone1.kwargs["init"], + resolved_pipeline.prelu_common_clone2.kwargs["init"], + ) + + assert isinstance(prelu_init_value, float) + assert isinstance(prelu_shared1, float) + assert isinstance(prelu_shared2, float) + assert all(isinstance(resampled_value, float) for resampled_value in resampled_values) + + assert prelu_init_value == prelu_shared1 + assert prelu_init_value == prelu_shared2 + + assert len(set(resampled_values)) == len(resampled_values) + assert all(resampled_value != prelu_init_value for resampled_value in resampled_values) + + +@pytest.mark.repeat(200) +def test_resampled_integer(): + pipeline = ActPipelineComplexInteger() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + assert resolved_pipeline is not None + assert tuple(resolved_pipeline.get_attrs().keys()) == ( + "prelu_init_value", + "prelu_shared1", + "prelu_shared2", + "prelu_own_clone1", + "prelu_own_clone2", + "prelu_common_clone1", + "prelu_common_clone2", + "act", + ) + + prelu_init_value = resolved_pipeline.prelu_init_value + prelu_shared1 = resolved_pipeline.prelu_shared1.kwargs["init"] + prelu_shared2 = resolved_pipeline.prelu_shared2.kwargs["init"] + resampled_values = ( + resolved_pipeline.prelu_own_clone1.kwargs["init"], + resolved_pipeline.prelu_own_clone2.kwargs["init"], + resolved_pipeline.prelu_common_clone1.kwargs["init"], + resolved_pipeline.prelu_common_clone2.kwargs["init"], + ) + + assert isinstance(prelu_init_value, int) + assert isinstance(prelu_shared1, int) + assert isinstance(prelu_shared2, int) + assert all(isinstance(resampled_value, int) for resampled_value in resampled_values) + + assert prelu_init_value == prelu_shared1 + assert prelu_init_value == prelu_shared2 + + assert len(set(resampled_values)) == len(resampled_values) + assert all(resampled_value != prelu_init_value for resampled_value in resampled_values) + + act = resolved_pipeline.act + + act_args = tuple(op.kwargs["init"] for op in act.args) + sampled_values = (prelu_shared1, prelu_shared2, *resampled_values) + assert len(act_args) == len(sampled_values) + for act_arg, sampled_value in zip(act_args, sampled_values): + assert act_arg is sampled_value + + act_resampled_prelu_shared = act.kwargs["prelu_shared"].kwargs["init"] + act_resampled_prelu_own_clone = act.kwargs["prelu_own_clone"].kwargs["init"] + act_resampled_prelu_common_clone = act.kwargs["prelu_common_clone"].kwargs["init"] + + assert isinstance(act_resampled_prelu_shared, int) + assert isinstance(act_resampled_prelu_own_clone, int) + assert isinstance(act_resampled_prelu_common_clone, int) + + assert act_resampled_prelu_shared == prelu_init_value + assert act_resampled_prelu_own_clone != prelu_init_value + assert act_resampled_prelu_common_clone != prelu_init_value + assert act_resampled_prelu_own_clone != act_resampled_prelu_common_clone + + act_resampled_hp_value = act.kwargs["resampled_hp_value"] + assert isinstance(act_resampled_hp_value, int) + assert act_resampled_hp_value != prelu_init_value + assert all(resampled_value != act_resampled_hp_value for resampled_value in resampled_values) + + +@pytest.mark.repeat(200) +def test_resampled_categorical(): + pipeline = CellPipelineCategorical() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + assert resolved_pipeline is not None + assert tuple(resolved_pipeline.get_attrs().keys()) == ( + "conv_block", + "op1", + "op2", + "cell", + ) + + conv_block = resolved_pipeline.conv_block + assert conv_block is not pipeline.conv_block + + op1 = resolved_pipeline.op1 + op2 = resolved_pipeline.op2 + assert op1 is not pipeline.op1 + assert op2 is not pipeline.op2 + + assert isinstance(op1, space.Operation) + assert isinstance(op2, space.Operation) + + assert (op1 is conv_block) or (op1.operator == "op1") + assert op2.operator == "conv1" or op2.operator == "conv2" or op2.operator == "op2" + + cell = resolved_pipeline.cell + assert cell is not pipeline.cell + + cell_args1 = cell.args[0] + cell_args2 = cell.args[1] + cell_args3 = cell.args[2] + cell_args4 = cell.args[3] + cell_args5 = cell.args[4] + cell_args6 = cell.args[5] + + assert cell_args1 is op1 + assert cell_args2 is op2 + # todo: think about what more tests we can have for cell_args[3-6] diff --git a/neps/space/new_space/tests/test_search_space__reuse_arch_elements.py b/neps/space/new_space/tests/test_search_space__reuse_arch_elements.py new file mode 100644 index 000000000..80208dc8d --- /dev/null +++ b/neps/space/new_space/tests/test_search_space__reuse_arch_elements.py @@ -0,0 +1,387 @@ +import pytest + +from neps.space.new_space import space + + +class ActPipelineSimple(space.Pipeline): + prelu = space.Operation( + operator="prelu", + kwargs={"init": 0.1}, + ) + relu = space.Operation(operator="relu") + + act: space.Operation = space.Categorical( + choices=(prelu, relu), + ) + + +class ActPipelineComplex(space.Pipeline): + prelu_init_value: float = space.Float(min_value=0.1, max_value=0.9) + prelu = space.Operation( + operator="prelu", + kwargs={"init": prelu_init_value}, + ) + act: space.Operation = space.Categorical( + choices=(prelu,), + ) + + +class FixedPipeline(space.Pipeline): + prelu_init_value: float = 0.5 + prelu = space.Operation( + operator="prelu", + kwargs={"init": prelu_init_value}, + ) + act = prelu + + +_conv_choices_low = ("conv1x1", "conv3x3") +_conv_choices_high = ("conv5x5", "conv9x9") +_conv_choices_prior_confidence_choices = ( + space.ConfidenceLevel.LOW, + space.ConfidenceLevel.MEDIUM, + space.ConfidenceLevel.HIGH, +) + + +class ConvPipeline(space.Pipeline): + conv_choices_prior_index: int = space.Integer( + min_value=0, + max_value=1, + log=False, + prior=0, + prior_confidence=space.ConfidenceLevel.LOW, + ) + conv_choices_prior_confidence: space.ConfidenceLevel = space.Categorical( + choices=_conv_choices_prior_confidence_choices, + prior_index=1, + prior_confidence=space.ConfidenceLevel.LOW, + ) + conv_choices: tuple[str, ...] = space.Categorical( + choices=(_conv_choices_low, _conv_choices_high), + prior_index=conv_choices_prior_index, + prior_confidence=conv_choices_prior_confidence, + ) + + _conv1: str = space.Categorical( + choices=conv_choices, + ) + _conv2: str = space.Categorical( + choices=conv_choices, + ) + + conv_block: space.Operation = space.Categorical( + choices=( + space.Operation( + operator="sequential3", + args=[_conv1, _conv2, _conv1], + ), + ), + ) + + +class CellPipeline(space.Pipeline): + _act = space.Operation(operator="relu") + _conv = space.Operation(operator="conv3x3") + _norm = space.Operation(operator="batch") + + conv_block = space.Operation(operator="sequential3", args=(_act, _conv, _norm)) + + op1 = space.Categorical( + choices=( + conv_block, + space.Operation(operator="zero"), + space.Operation(operator="avg_pool"), + ), + ) + op2 = space.Categorical( + choices=( + conv_block, + space.Operation(operator="zero"), + space.Operation(operator="avg_pool"), + ), + ) + + _some_int = 2 + _some_float = space.Float(min_value=0.5, max_value=0.5) + + cell = space.Operation( + operator="cell", + args=(op1, op2, op1, op2, op1, op2), + kwargs={"float_hp": _some_float, "int_hp": _some_int}, + ) + + +@pytest.mark.repeat(50) +def test_nested_simple(): + pipeline = ActPipelineSimple() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + assert resolved_pipeline is not None + assert tuple(resolved_pipeline.get_attrs().keys()) == ("prelu", "relu", "act") + + assert resolved_pipeline.prelu is pipeline.prelu + assert resolved_pipeline.relu is pipeline.relu + + +@pytest.mark.repeat(50) +def test_nested_simple_string(): + possible_cell_config_strings = { + "(relu)", + "(prelu {'init': 0.1})", + } + + pipeline = ActPipelineSimple() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + act = resolved_pipeline.act + act_config_string = space.convert_operation_to_string(act) + assert act_config_string + assert act_config_string in possible_cell_config_strings + + +@pytest.mark.repeat(50) +def test_nested_complex(): + pipeline = ActPipelineComplex() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + assert resolved_pipeline is not None + assert tuple(resolved_pipeline.get_attrs().keys()) == ("prelu_init_value", "prelu", "act") + + prelu_init_value = resolved_pipeline.prelu_init_value + assert 0.1 <= prelu_init_value <= 0.9 + + prelu = resolved_pipeline.prelu + assert prelu.operator == "prelu" + assert isinstance(prelu.kwargs["init"], float) + assert prelu.kwargs["init"] is prelu_init_value + assert not prelu.args + + act = resolved_pipeline.act + assert act.operator == "prelu" + assert act is prelu + + +@pytest.mark.repeat(50) +def test_nested_complex_string(): + pipeline = ActPipelineComplex() + + resolved_pipeline, sampled_values = space.resolve(pipeline) + + act = resolved_pipeline.act + act_config_string = space.convert_operation_to_string(act) + assert act_config_string + + # expected to look like: "(prelu {'init': 0.1087727907176638})" + expected_prefix = "(prelu {'init': " + expected_ending = "})" + assert act_config_string.startswith(expected_prefix) + assert act_config_string.endswith(expected_ending) + assert 0.1 <= float(act_config_string[len(expected_prefix) : -len(expected_ending)]) <= 0.9 + + +def test_fixed_pipeline(): + pipeline = FixedPipeline() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + assert resolved_pipeline is not None + assert tuple(resolved_pipeline.get_attrs().keys()) == tuple(pipeline.get_attrs().keys()) + + assert resolved_pipeline.prelu_init_value == pipeline.prelu_init_value + assert resolved_pipeline.prelu is pipeline.prelu + assert resolved_pipeline.act is pipeline.act + assert resolved_pipeline is pipeline + + +def test_fixed_pipeline_string(): + pipeline = FixedPipeline() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + act = resolved_pipeline.act + act_config_string = space.convert_operation_to_string(act) + assert act_config_string + assert act_config_string == "(prelu {'init': 0.5})" + + +@pytest.mark.repeat(50) +def test_simple_reuse(): + pipeline = ConvPipeline() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + assert resolved_pipeline is not None + assert tuple(resolved_pipeline.get_attrs().keys()) == ( + "conv_choices_prior_index", + "conv_choices_prior_confidence", + "conv_choices", + "conv_block", + ) + + conv_choices_prior_index = resolved_pipeline.conv_choices_prior_index + assert conv_choices_prior_index == 0 or conv_choices_prior_index == 1 + + conv_choices_prior_confidence = resolved_pipeline.conv_choices_prior_confidence + assert conv_choices_prior_confidence in _conv_choices_prior_confidence_choices + + conv_choices = resolved_pipeline.conv_choices + assert conv_choices == _conv_choices_low or conv_choices == _conv_choices_high + + conv_block = resolved_pipeline.conv_block + assert conv_block.operator == "sequential3" + for conv in conv_block.args: + assert conv in conv_choices + assert conv_block.args[0] == conv_block.args[2] + + +@pytest.mark.repeat(50) +def test_simple_reuse_string(): + possible_conv_block_config_strings = { + "(sequential3 (conv1x1) (conv1x1) (conv1x1))", + "(sequential3 (conv1x1) (conv3x3) (conv1x1))", + "(sequential3 (conv3x3) (conv1x1) (conv3x3))", + "(sequential3 (conv3x3) (conv3x3) (conv3x3))", + "(sequential3 (conv5x5) (conv5x5) (conv5x5))", + "(sequential3 (conv5x5) (conv9x9) (conv5x5))", + "(sequential3 (conv9x9) (conv5x5) (conv9x9))", + "(sequential3 (conv9x9) (conv9x9) (conv9x9))", + } + + pipeline = ConvPipeline() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + conv_block = resolved_pipeline.conv_block + conv_block_config_string = space.convert_operation_to_string(conv_block) + assert conv_block_config_string + assert conv_block_config_string in possible_conv_block_config_strings + + +@pytest.mark.repeat(50) +def test_shared_complex(): + pipeline = CellPipeline() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + assert resolved_pipeline is not pipeline + assert resolved_pipeline is not None + assert tuple(resolved_pipeline.get_attrs().keys()) == ( + "conv_block", + "op1", + "op2", + "cell", + ) + + conv_block = resolved_pipeline.conv_block + assert conv_block is pipeline.conv_block + + op1 = resolved_pipeline.op1 + op2 = resolved_pipeline.op2 + assert op1 is not pipeline.op1 + assert op2 is not pipeline.op2 + assert isinstance(op1, space.Operation) + assert isinstance(op2, space.Operation) + + if op1 is op2: + assert op1 is conv_block + else: + assert op1.operator in {"zero", "avg_pool", "sequential3"} + assert op2.operator in {"zero", "avg_pool", "sequential3"} + if op1.operator == "sequential3" or op2.operator == "sequential3": + assert op1.operator != op2.operator + + cell = resolved_pipeline.cell + assert cell is not pipeline.cell + assert cell.operator == "cell" + assert cell.args[0] is op1 + assert cell.args[1] is op2 + assert cell.args[2] is op1 + assert cell.args[3] is op2 + assert cell.args[4] is op1 + assert cell.args[5] is op2 + assert len(cell.kwargs) == 2 + assert cell.kwargs["float_hp"] == 0.5 + assert cell.kwargs["int_hp"] == 2 + + +@pytest.mark.repeat(50) +def test_shared_complex_string(): + possible_cell_config_strings = { + "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (avg_pool) (avg_pool) (avg_pool) (avg_pool) (avg_pool))", + "(cell {'float_hp': 0.5, 'int_hp': 2} (zero) (sequential3 (relu) (conv3x3) (batch)) (zero) (sequential3 (relu) (conv3x3) (batch)) (zero) (sequential3 (relu) (conv3x3) (batch)))", + "(cell {'float_hp': 0.5, 'int_hp': 2} (sequential3 (relu) (conv3x3) (batch)) (avg_pool) (sequential3 (relu) (conv3x3) (batch)) (avg_pool) (sequential3 (relu) (conv3x3) (batch)) (avg_pool))", + "(cell {'float_hp': 0.5, 'int_hp': 2} (zero) (zero) (zero) (zero) (zero) (zero))", + "(cell {'float_hp': 0.5, 'int_hp': 2} (zero) (avg_pool) (zero) (avg_pool) (zero) (avg_pool))", + "(cell {'float_hp': 0.5, 'int_hp': 2} (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu) (conv3x3) (batch)))", + "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (zero) (avg_pool) (zero) (avg_pool) (zero))", + "(cell {'float_hp': 0.5, 'int_hp': 2} (sequential3 (relu) (conv3x3) (batch)) (zero) (sequential3 (relu) (conv3x3) (batch)) (zero) (sequential3 (relu) (conv3x3) (batch)) (zero))", + "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (sequential3 (relu) (conv3x3) (batch)) (avg_pool) (sequential3 (relu) (conv3x3) (batch)) (avg_pool) (sequential3 (relu) (conv3x3) (batch)))", + } + + pipeline = CellPipeline() + + resolved_pipeline, _resolution_context = space.resolve(pipeline) + + cell = resolved_pipeline.cell + cell_config_string = space.convert_operation_to_string(cell) + assert cell_config_string + assert cell_config_string in possible_cell_config_strings + + +def test_shared_complex_context(): + # todo: move the context testing part to its own test file. + # This one should only do the reuse tests + + # todo: add a more complex test, where we have hidden Categorical choices. + # E.g. add Resampled along the way + + samplings_to_make = { + "Resolvable.op1::categorical__3": 2, + "Resolvable.op2::categorical__3": 1, + "Resolvable.cell.kwargs{float_hp}::float__0.5_0.5_False": 0.5, + } + + pipeline = CellPipeline() + + resolved_pipeline_first, _resolution_context_first = space.resolve( + pipeline=pipeline, + domain_sampler=space.OnlyPredefinedValuesSampler( + predefined_samplings=samplings_to_make, + ), + ) + sampled_values_first = _resolution_context_first.samplings_made + + assert resolved_pipeline_first is not pipeline + assert sampled_values_first is not None + assert sampled_values_first is not samplings_to_make + assert sampled_values_first == samplings_to_make + assert list(sampled_values_first.items()) == list(samplings_to_make.items()) + + resolved_pipeline_second, _resolution_context_second = space.resolve( + pipeline=pipeline, + domain_sampler=space.OnlyPredefinedValuesSampler( + predefined_samplings=samplings_to_make, + ), + ) + sampled_values_second = _resolution_context_second.samplings_made + + assert resolved_pipeline_second is not pipeline + assert resolved_pipeline_second is not None + assert sampled_values_second is not samplings_to_make + assert sampled_values_second == samplings_to_make + assert list(sampled_values_second.items()) == list(samplings_to_make.items()) + + # the second resolution should give us a new object + assert resolved_pipeline_second is not resolved_pipeline_first + + expected_config_string: str = ( + "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (zero) (avg_pool) (zero) (avg_pool) (zero))" + ) + + # however, their final results should be the same thing + assert space.convert_operation_to_string(resolved_pipeline_first.cell) == expected_config_string + assert space.convert_operation_to_string(resolved_pipeline_second.cell) == expected_config_string diff --git a/neps/space/new_space/tests/utils.py b/neps/space/new_space/tests/utils.py new file mode 100644 index 000000000..a548dfb64 --- /dev/null +++ b/neps/space/new_space/tests/utils.py @@ -0,0 +1,24 @@ +import pprint +from typing import Callable + +from neps.space.new_space import space + + +def generate_possible_config_strings( + pipeline: space.Pipeline, + resolved_pipeline_attr_getter: Callable[[space.Pipeline], space.Operation], + num_resolutions: int = 50_000, + display: bool = True, +): + result = set() + + for _ in range(num_resolutions): + resolved_pipeline, _resolution_context = space.resolve(pipeline) + attr = resolved_pipeline_attr_getter(resolved_pipeline) + config_string = space.convert_operation_to_string(attr) + result.add(config_string) + + if display: + pprint.pprint(result, indent=2) + + return result diff --git a/neps/space/parsing.py b/neps/space/parsing.py index 6c46e7b4a..64fabbec1 100644 --- a/neps/space/parsing.py +++ b/neps/space/parsing.py @@ -10,6 +10,7 @@ from typing import TYPE_CHECKING, Any, TypeAlias from neps.space.parameters import Categorical, Constant, Float, Integer, Parameter +from neps.space.new_space.space import Pipeline from neps.space.search_space import SearchSpace if TYPE_CHECKING: @@ -295,8 +296,9 @@ def convert_to_space( Mapping[str, dict | str | int | float | Parameter] | SearchSpace | ConfigurationSpace + | Pipeline ), -) -> SearchSpace: +) -> SearchSpace | Pipeline: """Converts a search space to a SearchSpace object. Args: @@ -319,6 +321,8 @@ def convert_to_space( return space case Mapping(): return convert_mapping(space) + case Pipeline(): + return space case _: raise ValueError( f"Unsupported type '{type(space)}' for conversion to SearchSpace." diff --git a/pyproject.toml b/pyproject.toml index cf1961d9c..bfe378795 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,6 +75,7 @@ dev = [ "mypy>=1,<2", "pytest>=7,<8", "pytest-cases>=3,<4", + "pytest-repeat>=0,<1", "types-PyYAML>=6,<7", "mkdocs-material", "mkdocs-autorefs", From 5bbbce8ae032c97a359d36595fdbe69f743764a6 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Fri, 18 Apr 2025 22:53:07 +0200 Subject: [PATCH 002/156] Add centering --- neps/space/new_space/space.py | 202 ++++++++++++- .../new_space/tests/test_domain__centering.py | 266 ++++++++++++++++++ 2 files changed, 467 insertions(+), 1 deletion(-) create mode 100644 neps/space/new_space/tests/test_domain__centering.py diff --git a/neps/space/new_space/space.py b/neps/space/new_space/space.py index b2dc58b59..24ed80d2d 100644 --- a/neps/space/new_space/space.py +++ b/neps/space/new_space/space.py @@ -19,6 +19,7 @@ Callable, Mapping, Generator, + Type, ) import neps.space.new_space.config_string as config_string @@ -110,6 +111,16 @@ class ConfidenceLevel(enum.Enum): class Domain(Resolvable, abc.ABC, Generic[T]): + @property + @abc.abstractmethod + def min_value(self) -> T: + raise NotImplementedError() + + @property + @abc.abstractmethod + def max_value(self) -> T: + raise NotImplementedError() + @property @abc.abstractmethod def has_prior(self) -> bool: @@ -134,6 +145,14 @@ def range_compatibility_identifier(self) -> str: def sample(self) -> T: raise NotImplementedError() + @abc.abstractmethod + def centered_around( + self, + center: T, + confidence: ConfidenceLevel, + ) -> Domain[T]: + raise NotImplementedError() + def get_attrs(self) -> Mapping[str, Any]: return {k.lstrip("_"): v for k, v in vars(self).items()} @@ -141,6 +160,46 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Domain[T]: return type(self)(**attrs) +def _calculate_new_domain_bounds( + number_type: Type[int] | Type[float], + min_value: int | float, + max_value: int | float, + center: int | float, + confidence: ConfidenceLevel, +) -> tuple[int, int] | tuple[float, float]: + if center < min_value or center > max_value: + raise ValueError(f"Center value {center!r} must be within domain range [{min_value!r}, {max_value!r}]") + + # Determine a chunk size by splitting the domain range into a fixed number of chunks. + # Then use the confidence level to decide how many chunks to include + # around the given center (on each side). + + number_of_chunks = 10.0 + chunk_size = (max_value - min_value) / number_of_chunks + + # The numbers refer to how many segments to have on each side of the center. + confidence_to_number_of_chunks_on_each_side = { + ConfidenceLevel.HIGH: 1.0, + ConfidenceLevel.MEDIUM: 2.5, + ConfidenceLevel.LOW: 4.0, + } + + chunk_multiplier = confidence_to_number_of_chunks_on_each_side[confidence] + interval_radius = chunk_size * chunk_multiplier + + if number_type is int: + # In this case we need to use ceil/floor so that we end up with ints. + new_min = max(min_value, math.floor(center - interval_radius)) + new_max = min(max_value, math.ceil(center + interval_radius)) + elif number_type is float: + new_min = max(min_value, center - interval_radius) + new_max = min(max_value, center + interval_radius) + else: + raise ValueError(f"Unsupported number type {number_type!r}.") + + return new_min, new_max + + class Categorical(Domain[int], Generic[T]): def __init__( self, @@ -156,6 +215,14 @@ def __init__( self._prior_index = prior_index self._prior_confidence = prior_confidence + @property + def min_value(self) -> int: + return 0 + + @property + def max_value(self) -> int: + return max(len(cast(tuple, self._choices)) - 1, 0) + @property def choices(self) -> tuple[T | Domain[T] | Resolvable, ...] | Domain[T]: return self._choices @@ -183,6 +250,28 @@ def range_compatibility_identifier(self) -> str: def sample(self) -> int: return int(random.randint(0, len(cast(tuple[T], self._choices)) - 1)) + def centered_around( + self, + center: int, + confidence: ConfidenceLevel, + ) -> Categorical: + new_min, new_max = cast( + tuple[int, int], + _calculate_new_domain_bounds( + number_type=int, + min_value=self.min_value, + max_value=self.max_value, + center=center, + confidence=confidence, + ), + ) + new_choices = cast(tuple, self._choices)[new_min : new_max + 1] + return Categorical( + choices=new_choices, + prior_index=new_choices.index(cast(tuple, self._choices)[center]), + prior_confidence=confidence, + ) + class Float(Domain[float]): def __init__( @@ -234,6 +323,26 @@ def sample(self) -> float: return float(math.exp(random.uniform(log_min, log_max))) return float(random.uniform(self._min_value, self._max_value)) + def centered_around( + self, + center: float, + confidence: ConfidenceLevel, + ) -> Float: + new_min, new_max = _calculate_new_domain_bounds( + number_type=float, + min_value=self.min_value, + max_value=self.max_value, + center=center, + confidence=confidence, + ) + return Float( + min_value=new_min, + max_value=new_max, + log=self._log, + prior=center, + prior_confidence=confidence, + ) + class Integer(Domain[int]): def __init__( @@ -283,6 +392,29 @@ def sample(self) -> int: raise NotImplementedError("TODO.") return int(random.randint(self._min_value, self._max_value)) + def centered_around( + self, + center: int, + confidence: ConfidenceLevel, + ) -> Integer: + new_min, new_max = cast( + tuple[int, int], + _calculate_new_domain_bounds( + number_type=int, + min_value=self.min_value, + max_value=self.max_value, + center=center, + confidence=confidence, + ), + ) + return Integer( + min_value=new_min, + max_value=new_max, + log=self._log, + prior=center, + prior_confidence=confidence, + ) + class Operation(Resolvable): def __init__( @@ -487,6 +619,48 @@ def __call__( return self._random_sampler(domain_obj=domain_obj, current_path=current_path) +class MutatateUsingCentersSampler(DomainSampler): + def __init__( + self, + predefined_samplings: Mapping[str, Any], + n_mutations: int, + ): + if not isinstance(n_mutations, int) or n_mutations <= 0 or n_mutations > len(predefined_samplings): + raise ValueError(f"Invalid value for `n_mutations`: {n_mutations!r}.") + + self._kept_samplings_to_make = _mutate_samplings_to_make_by_forgetting( + samplings_to_make=predefined_samplings, + n_forgets=n_mutations, + ) + + # Still remember the original choices. We'll use them as centers later. + self._original_samplings_to_make = predefined_samplings + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + if current_path not in self._kept_samplings_to_make: + # For this path we either have forgotten the value or we never had it. + if current_path in self._original_samplings_to_make: + # If we had a value for this path originally, use it as a center. + original_value = self._original_samplings_to_make[current_path] + sampled_value = domain_obj.centered_around( + center=original_value, + confidence=ConfidenceLevel.HIGH, + ).sample() + else: + # We never had a value for this path, we can only sample from the domain. + sampled_value = domain_obj.sample() + else: + # For this path we have chosen to keep the original value. + sampled_value = cast(T, self._kept_samplings_to_make[current_path]) + + return sampled_value + + class CrossoverNotPossibleError(Exception): pass @@ -1133,6 +1307,32 @@ def __call__( # Do some mutations. for top_trial in top_trials: top_trial_config = top_trial.config + + # Mutate by resampling around some values of the original config. + mutated_incumbents += [ + resolve( + pipeline=self._pipeline, + domain_sampler=MutatateUsingCentersSampler( + predefined_samplings=top_trial_config, + n_mutations=1, + ), + environment_values=self._environment_values, + ) + for _ in range(n_requested * 5) + ] + mutated_incumbents += [ + resolve( + pipeline=self._pipeline, + domain_sampler=MutatateUsingCentersSampler( + predefined_samplings=top_trial_config, + n_mutations=random.randint(1, int(len(top_trial_config) / 2)), + ), + environment_values=self._environment_values, + ) + for _ in range(n_requested * 5) + ] + + # Mutate by completely forgetting some values of the original config. mutated_incumbents += [ resolve( pipeline=self._pipeline, @@ -1149,7 +1349,7 @@ def __call__( pipeline=self._pipeline, domain_sampler=MutateByForgettingSampler( predefined_samplings=top_trial_config, - n_forgets=random.randint(1, len(top_trial_config)), + n_forgets=random.randint(1, int(len(top_trial_config) / 2)), ), environment_values=self._environment_values, ) diff --git a/neps/space/new_space/tests/test_domain__centering.py b/neps/space/new_space/tests/test_domain__centering.py new file mode 100644 index 000000000..b04769233 --- /dev/null +++ b/neps/space/new_space/tests/test_domain__centering.py @@ -0,0 +1,266 @@ +import pytest + +import neps.space.new_space.space as space + + +@pytest.mark.parametrize( + ["confidence_level", "expected_prior_min_max"], + [ + (space.ConfidenceLevel.LOW, (50, 10, 90)), + (space.ConfidenceLevel.MEDIUM, (50, 25, 75)), + (space.ConfidenceLevel.HIGH, (50, 40, 60)), + ], +) +def test_centering_integer( + confidence_level, + expected_prior_min_max, +): + # Construct domains manually and then with priors. + # They are constructed in a way that after centering they both + # refer to identical domain ranges. + + int_prior = 50 + + int1 = space.Integer( + min_value=1, + max_value=100, + ) + int2 = space.Integer( + min_value=1, + max_value=100, + prior=int_prior, + prior_confidence=confidence_level, + ) + + int1_centered = int1.centered_around(int_prior, confidence_level) + int2_centered = int2.centered_around(int2.prior, int2.prior_confidence) + + assert int_prior == expected_prior_min_max[0] + assert ( + ( + int1_centered.prior, + int1_centered.min_value, + int1_centered.max_value, + ) + == ( + int2_centered.prior, + int2_centered.min_value, + int2_centered.max_value, + ) + == expected_prior_min_max + ) + + int1_centered.sample() + int2_centered.sample() + + +@pytest.mark.parametrize( + ["confidence_level", "expected_prior_min_max"], + [ + (space.ConfidenceLevel.LOW, (50.0, 10.399999999999999, 89.6)), + (space.ConfidenceLevel.MEDIUM, (50.0, 25.25, 74.75)), + (space.ConfidenceLevel.HIGH, (50.0, 40.1, 59.9)), + ], +) +def test_centering_float( + confidence_level, + expected_prior_min_max, +): + # Construct domains manually and then with priors. + # They are constructed in a way that after centering they both + # refer to identical domain ranges. + + float_prior = 50.0 + + float1 = space.Float( + min_value=1.0, + max_value=100.0, + ) + float2 = space.Float( + min_value=1.0, + max_value=100.0, + prior=float_prior, + prior_confidence=confidence_level, + ) + + float1_centered = float1.centered_around(float_prior, confidence_level) + float2_centered = float2.centered_around(float2.prior, float2.prior_confidence) + + assert float_prior == expected_prior_min_max[0] + assert ( + ( + float1_centered.prior, + float1_centered.min_value, + float1_centered.max_value, + ) + == ( + float2_centered.prior, + float2_centered.min_value, + float2_centered.max_value, + ) + == expected_prior_min_max + ) + + float1_centered.sample() + float2_centered.sample() + + +@pytest.mark.parametrize( + ["confidence_level", "expected_prior_min_max_value"], + [ + (space.ConfidenceLevel.LOW, (40, 0, 80, 50)), + (space.ConfidenceLevel.MEDIUM, (25, 0, 50, 50)), + (space.ConfidenceLevel.HIGH, (10, 0, 20, 50)), + ], +) +def test_centering_categorical( + confidence_level, + expected_prior_min_max_value, +): + # Construct domains manually and then with priors. + # They are constructed in a way that after centering they both + # refer to identical domain ranges. + + categorical_prior_index_original = 49 + + categorical1 = space.Categorical( + choices=tuple(range(1, 101)), + ) + categorical2 = space.Categorical( + choices=tuple(range(1, 101)), + prior_index=categorical_prior_index_original, + prior_confidence=confidence_level, + ) + + categorical1_centered = categorical1.centered_around(categorical_prior_index_original, confidence_level) + categorical2_centered = categorical2.centered_around(categorical2.prior, categorical2.prior_confidence) + + # During the centering of categorical objects, the prior index will change. + assert categorical_prior_index_original != expected_prior_min_max_value[0] + + assert ( + ( + categorical1_centered.prior, + categorical1_centered.min_value, + categorical1_centered.max_value, + categorical1_centered.choices[categorical1_centered.prior], + ) + == ( + categorical2_centered.prior, + categorical2_centered.min_value, + categorical2_centered.max_value, + categorical2_centered.choices[categorical2_centered.prior], + ) + == expected_prior_min_max_value + ) + + categorical1_centered.sample() + categorical2_centered.sample() + + +@pytest.mark.parametrize( + ["confidence_level", "expected_prior_min_max"], + [ + (space.ConfidenceLevel.LOW, (10, 5, 13)), + (space.ConfidenceLevel.MEDIUM, (10, 7, 13)), + (space.ConfidenceLevel.HIGH, (10, 8, 12)), + ], +) +def test_centering_stranger_ranges_integer( + confidence_level, + expected_prior_min_max, +): + int1 = space.Integer( + min_value=1, + max_value=13, + ) + int1_centered = int1.centered_around(10, confidence_level) + + int2 = space.Integer( + min_value=1, + max_value=13, + prior=10, + prior_confidence=confidence_level, + ) + int2_centered = int2.centered_around(int2.prior, int2.prior_confidence) + + assert (int1_centered.prior, int1_centered.min_value, int1_centered.max_value) == expected_prior_min_max + assert (int2_centered.prior, int2_centered.min_value, int2_centered.max_value) == expected_prior_min_max + + int1_centered.sample() + int2_centered.sample() + + +@pytest.mark.parametrize( + ["confidence_level", "expected_prior_min_max"], + [ + (space.ConfidenceLevel.LOW, (0.5, 0.09999999999999998, 0.9)), + (space.ConfidenceLevel.MEDIUM, (0.5, 0.25, 0.75)), + (space.ConfidenceLevel.HIGH, (0.5, 0.4, 0.6)), + ], +) +def test_centering_stranger_ranges_float( + confidence_level, + expected_prior_min_max, +): + float1 = space.Float( + min_value=0.0, + max_value=1.0, + ) + float1_centered = float1.centered_around(0.5, confidence_level) + + float2 = space.Float( + min_value=0.0, + max_value=1.0, + prior=0.5, + prior_confidence=confidence_level, + ) + float2_centered = float2.centered_around(float2.prior, float2.prior_confidence) + + assert (float1_centered.prior, float1_centered.min_value, float1_centered.max_value) == expected_prior_min_max + assert (float2_centered.prior, float2_centered.min_value, float2_centered.max_value) == expected_prior_min_max + + float1_centered.sample() + float2_centered.sample() + + +@pytest.mark.parametrize( + ["confidence_level", "expected_prior_min_max_value"], + [ + (space.ConfidenceLevel.LOW, (2, 0, 5, 2)), + (space.ConfidenceLevel.MEDIUM, (2, 0, 4, 2)), + (space.ConfidenceLevel.HIGH, (1, 0, 2, 2)), + ], +) +def test_centering_stranger_ranges_categorical( + confidence_level, + expected_prior_min_max_value, +): + categorical1 = space.Categorical( + choices=tuple(range(0, 7)), + ) + categorical1_centered = categorical1.centered_around(2, confidence_level) + + categorical2 = space.Categorical( + choices=tuple(range(0, 7)), + prior_index=2, + prior_confidence=confidence_level, + ) + categorical2_centered = categorical2.centered_around(categorical2.prior, categorical2.prior_confidence) + + assert ( + categorical1_centered.prior, + categorical1_centered.min_value, + categorical1_centered.max_value, + categorical1_centered.choices[categorical1_centered.prior], + ) == expected_prior_min_max_value + + assert ( + categorical2_centered.prior, + categorical2_centered.min_value, + categorical2_centered.max_value, + categorical2_centered.choices[categorical2_centered.prior], + ) == expected_prior_min_max_value + + categorical1_centered.sample() + categorical2_centered.sample() From a6b10ff2efb36c2976b25e07412224dc56bf5d0b Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Sun, 20 Apr 2025 02:11:44 +0200 Subject: [PATCH 003/156] Add bracket optimizers --- neps/space/new_space/bracket_optimizer.py | 297 ++++++++++++++++++ neps/space/new_space/priorband.py | 184 +++++++++++ neps/space/new_space/space.py | 15 +- ...st_neps_integration_priorband__max_cost.py | 182 +++++++++++ ...t_neps_integration_priorband__max_evals.py | 164 ++++++++++ 5 files changed, 835 insertions(+), 7 deletions(-) create mode 100644 neps/space/new_space/bracket_optimizer.py create mode 100644 neps/space/new_space/priorband.py create mode 100644 neps/space/new_space/tests/test_neps_integration_priorband__max_cost.py create mode 100644 neps/space/new_space/tests/test_neps_integration_priorband__max_evals.py diff --git a/neps/space/new_space/bracket_optimizer.py b/neps/space/new_space/bracket_optimizer.py new file mode 100644 index 000000000..6c0508400 --- /dev/null +++ b/neps/space/new_space/bracket_optimizer.py @@ -0,0 +1,297 @@ +from __future__ import annotations + +import logging +from collections.abc import Callable, Mapping, Sequence +from dataclasses import dataclass +from functools import partial +from typing import TYPE_CHECKING, Literal, Any + +import pandas as pd + +from neps.optimizers.optimizer import SampledConfig +from neps.space.new_space.priorband import PriorBandSampler +from neps.optimizers.utils.brackets import PromoteAction, SampleAction +import neps.space.new_space.space as new_space +import neps.optimizers.bracket_optimizer as standard_bracket_optimizer + +if TYPE_CHECKING: + from neps.optimizers.utils.brackets import Bracket + from neps.state.optimizer import BudgetInfo + from neps.state.trial import Trial + + +logger = logging.getLogger(__name__) + + +@dataclass +class _BracketOptimizer: + """The pipeline space to optimize over.""" + + space: new_space.Pipeline + + """Whether or not to sample the prior first. + + If set to `"highest_fidelity"`, the prior will be sampled at the highest fidelity, + otherwise at the lowest fidelity. + """ + sample_prior_first: bool | Literal["highest_fidelity"] + + """The eta parameter for the algorithm.""" + eta: int + + """The mapping from rung to fidelity value.""" + rung_to_fid: Mapping[int, int | float] + + """A function that creates the brackets from the table of trials.""" + create_brackets: Callable[[pd.DataFrame], Sequence[Bracket] | Bracket] + + """The sampler used to generate new trials.""" + sampler: PriorBandSampler + + def __call__( # noqa: C901, PLR0912 + self, + trials: Mapping[str, Trial], + budget_info: BudgetInfo | None, + n: int | None = None, + ) -> SampledConfig | list[SampledConfig]: + assert n is None, "TODO" + + # If we have no trials, we either go with the prior or just a sampled config + if len(trials) == 0: + match self.sample_prior_first: + case "highest_fidelity": # fid_max + config = self._sample_prior(fidelity_level="max") + rung = max(self.rung_to_fid) + return SampledConfig(id=f"1_{rung}", config=config) + case True: # fid_min + config = self._sample_prior(fidelity_level="min") + rung = min(self.rung_to_fid) + return SampledConfig(id=f"1_{rung}", config=config) + case False: + pass + + table = standard_bracket_optimizer.trials_to_table(trials=trials) + + if len(table) == 0: # noqa: SIM108 + # Nothing there, this sample will be the first + nxt_id = 1 + else: + # One plus the maximum current id in the table index + nxt_id = table.index.get_level_values("id").max() + 1 # type: ignore + + # We don't want the first highest fidelity sample ending + # up in a bracket + if self.sample_prior_first == "highest_fidelity": + table = table.iloc[1:] + + # Get and execute the next action from our brackets that are not pending or done + assert isinstance(table, pd.DataFrame) + brackets = self.create_brackets(table) + + if not isinstance(brackets, Sequence): + brackets = [brackets] + + next_action = next( + (action for bracket in brackets if (action := bracket.next()) not in ("done", "pending")), + None, + ) + + if next_action is None: + raise RuntimeError( + f"{self.__class__.__name__} never got a 'sample' or 'promote' action!" + f" This likely means the implementation of {self.create_brackets}" + " is incorrect and should have provded enough brackets, where at" + " least one of them should have requested another sample." + f"\nBrackets:\n{brackets}" + ) + + match next_action: + # The bracket would like us to promote a configuration + case PromoteAction(config=config, id=config_id, new_rung=new_rung): + config = self._convert_to_another_rung(config=config, rung=new_rung) + return SampledConfig( + id=f"{config_id}_{new_rung}", + config=config, + previous_config_id=f"{config_id}_{new_rung - 1}", + ) + + # We need to sample for a new rung. + case SampleAction(rung=rung): + config = self.sampler.sample_config(table, rung=rung) + config = self._convert_to_another_rung(config=config, rung=rung) + return SampledConfig( + id=f"{nxt_id}_{rung}", + config=config, + ) + + case _: + raise RuntimeError(f"Unknown bracket action: {next_action}") + + def _sample_prior( + self, + fidelity_level: Literal["min"] | Literal["max"], + ) -> dict[str, Any]: + # TODO: [lum] have a CenterSampler as fallback, not Random + _try_always_priors_sampler = new_space.PriorOrFallbackSampler( + fallback_sampler=new_space.RandomSampler(predefined_samplings={}), + prior_use_probability=1, + ) + + _environment_values = {} + _fidelity_attrs = self.space.fidelity_attrs + for fidelity_name, fidelity_obj in _fidelity_attrs.items(): + if fidelity_level == "max": + _environment_values[fidelity_name] = fidelity_obj.max_value + elif fidelity_level == "min": + _environment_values[fidelity_name] = fidelity_obj.min_value + else: + raise ValueError(f"Invalid fidelity level {fidelity_level}") + + _resolved_pipeline, resolution_context = new_space.resolve( + pipeline=self.space, + domain_sampler=_try_always_priors_sampler, + environment_values=_environment_values, + ) + + config = new_space.NepsCompatConverter.to_neps_config(resolution_context) + return dict(**config) + + def _convert_to_another_rung( + self, + config: dict[str, Any], + rung: int, + ) -> dict[str, Any]: + data = new_space.NepsCompatConverter.from_neps_config(config=config) + + _environment_values = {} + _fidelity_attrs = self.space.fidelity_attrs + assert len(_fidelity_attrs) == 1, "TODO: [lum]" + for fidelity_name, fidelity_obj in _fidelity_attrs.items(): + _environment_values[fidelity_name] = self.rung_to_fid[rung] + + _resolved_pipeline, resolution_context = new_space.resolve( + pipeline=self.space, + domain_sampler=new_space.OnlyPredefinedValuesSampler( + predefined_samplings=data.predefined_samplings, + ), + environment_values=_environment_values, + ) + + config = new_space.NepsCompatConverter.to_neps_config(resolution_context) + return dict(**config) + + +def priorband( + space: new_space.Pipeline, + *, + eta: int = 3, + sample_prior_first: bool | Literal["highest_fidelity"] = False, + base: Literal["successive_halving", "hyperband", "asha", "async_hb"] = "hyperband", +) -> _BracketOptimizer: + return _bracket_optimizer( + pipeline_space=space, + bracket_type=base, + eta=eta, + sampler="priorband", + sample_prior_first=sample_prior_first, + early_stopping_rate=0 if base in ("successive_halving", "asha") else None, + ) + + +def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 + pipeline_space: new_space.Pipeline, + *, + bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], + eta: int, + sampler: Literal["priorband"], + sample_prior_first: bool | Literal["highest_fidelity"], + early_stopping_rate: int | None, +) -> _BracketOptimizer: + + fidelity_attrs = pipeline_space.fidelity_attrs + + if len(fidelity_attrs) != 1: + raise ValueError("Only one fidelity should be defined in the pipeline space." f"\nGot: {fidelity_attrs!r}") + + fidelity_name, fidelity_obj = list(fidelity_attrs.items())[0] + + if sample_prior_first not in (True, False, "highest_fidelity"): + raise ValueError("sample_prior_first should be either True, False or 'highest_fidelity'") + + from neps.optimizers.utils import brackets + + # Determine the strategy for creating brackets for sampling + create_brackets: Callable[[pd.DataFrame], Sequence[Bracket] | Bracket] + match bracket_type: + case "successive_halving": + assert early_stopping_rate is not None + rung_to_fidelity, rung_sizes = brackets.calculate_sh_rungs( + bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + eta=eta, + early_stopping_rate=early_stopping_rate, + ) + create_brackets = partial( + brackets.Sync.create_repeating, + rung_sizes=rung_sizes, + ) + + case "hyperband": + assert early_stopping_rate is None + rung_to_fidelity, bracket_layouts = brackets.calculate_hb_bracket_layouts( + bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + eta=eta, + ) + create_brackets = partial( + brackets.Hyperband.create_repeating, + bracket_layouts=bracket_layouts, + ) + + case "asha": + assert early_stopping_rate is not None + rung_to_fidelity, _rung_sizes = brackets.calculate_sh_rungs( + bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + eta=eta, + early_stopping_rate=early_stopping_rate, + ) + create_brackets = partial( + brackets.Async.create, + rungs=list(rung_to_fidelity), + eta=eta, + ) + + case "async_hb": + assert early_stopping_rate is None + rung_to_fidelity, bracket_layouts = brackets.calculate_hb_bracket_layouts( + bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + eta=eta, + ) + # We don't care about the capacity of each bracket, we need the rung layout + bracket_rungs = [list(bracket.keys()) for bracket in bracket_layouts] + create_brackets = partial( + brackets.AsyncHyperband.create, + bracket_rungs=bracket_rungs, + eta=eta, + ) + case _: + raise ValueError(f"Unknown bracket type: {bracket_type}") + + _sampler: PriorBandSampler + match sampler: + case "priorband": + _sampler = PriorBandSampler( + space=pipeline_space, + eta=eta, + early_stopping_rate=(early_stopping_rate if early_stopping_rate is not None else 0), + fid_bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + ) + case _: + raise ValueError(f"Unknown sampler: {sampler}") + + return _BracketOptimizer( + space=pipeline_space, + eta=eta, + rung_to_fid=rung_to_fidelity, + sampler=_sampler, + sample_prior_first=sample_prior_first, + create_brackets=create_brackets, + ) diff --git a/neps/space/new_space/priorband.py b/neps/space/new_space/priorband.py new file mode 100644 index 000000000..3d74a6258 --- /dev/null +++ b/neps/space/new_space/priorband.py @@ -0,0 +1,184 @@ +from __future__ import annotations + +import random +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any + +import numpy as np + +from neps.optimizers.utils import brackets +import neps.space.new_space.space as new_space + +if TYPE_CHECKING: + import pandas as pd + + +@dataclass +class PriorBandSampler: + """Implement a sampler based on PriorBand""" + + """The pipeline space to optimize over.""" + space: new_space.Pipeline + + """The eta value to use for the SH bracket.""" + eta: int + + """The early stopping rate to use for the SH bracket.""" + early_stopping_rate: int + + """The fidelity bounds.""" + fid_bounds: tuple[int, int] | tuple[float, float] + + def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: + rung_to_fid, rung_sizes = brackets.calculate_sh_rungs( + bounds=self.fid_bounds, + eta=self.eta, + early_stopping_rate=self.early_stopping_rate, + ) + max_rung = max(rung_sizes) + + # Below we will follow the "geometric" spacing + w_random = 1 / (1 + self.eta**rung) + w_prior = 1 - w_random + + completed: pd.DataFrame = table[table["perf"].notna()] # type: ignore + + # To see if we activate incumbent sampling, we check: + # 1) We have at least one fully complete run + # 2) We have spent at least one full SH bracket worth of fidelity + # 3) There is at least one rung with eta evaluations to get the top 1/eta configs + completed_rungs = completed.index.get_level_values("rung") + one_complete_run_at_max_rung = (completed_rungs == max_rung).any() + + # For SH bracket cost, we include the fact we can continue runs, + # i.e. resources for rung 2 discounts the cost of evaluating to rung 1, + # only counting the difference in fidelity cost between rung 2 and rung 1. + cost_per_rung = {i: rung_to_fid[i] - rung_to_fid.get(i - 1, 0) for i in rung_to_fid} + + cost_of_one_sh_bracket = sum(rung_sizes[r] * cost_per_rung[r] for r in rung_sizes) + current_cost_used = sum(r * cost_per_rung[r] for r in completed_rungs) + spent_one_sh_bracket_worth_of_fidelity = current_cost_used >= cost_of_one_sh_bracket + + # Check that there is at least rung with `eta` evaluations + rung_counts = completed.groupby("rung").size() + any_rung_with_eta_evals = (rung_counts == self.eta).any() + + # If the conditions are not met, we sample from the prior or randomly depending on + # the geometrically distributed prior and uniform weights + if ( + one_complete_run_at_max_rung is False + or spent_one_sh_bracket_worth_of_fidelity is False + or any_rung_with_eta_evals is False + ): + policy = np.random.choice(["prior", "random"], p=[w_prior, w_random]) + match policy: + case "prior": + return self._sample_prior() + case "random": + return self._sample_random() + case _: + raise RuntimeError(f"Unknown policy: {policy}") + + # Otherwise, we now further split the `prior` weight into `(prior, inc)` + + # 1. Select the top `1//eta` percent of configs at the highest rung supporting it + rungs_with_at_least_eta = rung_counts[rung_counts >= self.eta].index # type: ignore + rung_table: pd.DataFrame = completed[ # type: ignore + completed.index.get_level_values("rung") == rungs_with_at_least_eta.max() + ] + + K = len(rung_table) // self.eta + top_k_configs = rung_table.nsmallest(K, columns=["perf"])["config"].tolist() + + # 2. Get the global incumbent, and build a prior distribution around it + inc_config = completed.loc[completed["perf"].idxmin()]["config"] + + # 3. Calculate a ratio score of how likely each of the top K configs are under + # the prior and inc distribution, weighing them by their position in the top K + # weights = torch.arange(K, 0, -1) + + # top_k_pdf_inc = inc_dist.pdf_configs(top_k_configs, frm=self.encoder) # type: ignore + # top_k_pdf_prior = prior_dist.pdf_configs(top_k_configs, frm=self.encoder) # type: ignore + + # unnormalized_inc_score = (weights * top_k_pdf_inc).sum() + # unnormalized_prior_score = (weights * top_k_pdf_prior).sum() + # total_score = unnormalized_inc_score + unnormalized_prior_score + + # inc_ratio = float(unnormalized_inc_score / total_score) + # prior_ratio = float(unnormalized_prior_score / total_score) + + # TODO: [lum]: Here I am simply using fixed values. + # Will maybe have to come up with a way to approximate the pdf for the top configs. + inc_ratio = float(0.75) + prior_ratio = float(0.25) + + # 4. And finally, we distribute the original w_prior according to this ratio + w_inc = w_prior * inc_ratio + w_prior = w_prior * prior_ratio + assert np.isclose(w_prior + w_inc + w_random, 1.0) + + # Now we use these weights to choose which sampling distribution to sample from + policy = np.random.choice( + ["prior", "inc", "random"], + p=[w_prior, w_inc, w_random], + ) + match policy: + case "prior": + return self._sample_prior() + case "random": + return self._sample_random() + case "inc": + assert inc_config is not None + return self._mutate_inc(inc_config) + raise RuntimeError(f"Unknown policy: {policy}") + + def _sample_prior(self) -> dict[str, Any]: + # TODO: [lum] have a CenterSampler as fallback, not Random + _try_always_priors_sampler = new_space.PriorOrFallbackSampler( + fallback_sampler=new_space.RandomSampler(predefined_samplings={}), + prior_use_probability=1, + ) + + _environment_values = {} + _fidelity_attrs = self.space.fidelity_attrs + for fidelity_name, fidelity_obj in _fidelity_attrs.items(): + _environment_values[fidelity_name] = fidelity_obj.max_value + + _resolved_pipeline, resolution_context = new_space.resolve( + pipeline=self.space, + domain_sampler=_try_always_priors_sampler, + environment_values=_environment_values, + ) + + config = new_space.NepsCompatConverter.to_neps_config(resolution_context) + return dict(**config) + + def _sample_random(self) -> dict[str, Any]: + _environment_values = {} + _fidelity_attrs = self.space.fidelity_attrs + for fidelity_name, fidelity_obj in _fidelity_attrs.items(): + _environment_values[fidelity_name] = fidelity_obj.max_value + + _resolved_pipeline, resolution_context = new_space.resolve( + pipeline=self.space, + domain_sampler=new_space.RandomSampler(predefined_samplings={}), + environment_values=_environment_values, + ) + + config = new_space.NepsCompatConverter.to_neps_config(resolution_context) + return dict(**config) + + def _mutate_inc(self, inc_config: dict[str, Any]) -> dict[str, Any]: + data = new_space.NepsCompatConverter.from_neps_config(config=inc_config) + + _resolved_pipeline, resolution_context = new_space.resolve( + pipeline=self.space, + domain_sampler=new_space.MutatateUsingCentersSampler( + predefined_samplings=data.predefined_samplings, + n_mutations=max(1, random.randint(1, int(len(inc_config) / 2))), + ), + environment_values=data.environment_values, + ) + + config = new_space.NepsCompatConverter.to_neps_config(resolution_context) + return dict(**config) diff --git a/neps/space/new_space/space.py b/neps/space/new_space/space.py index 24ed80d2d..f98fe983c 100644 --- a/neps/space/new_space/space.py +++ b/neps/space/new_space/space.py @@ -1325,7 +1325,7 @@ def __call__( pipeline=self._pipeline, domain_sampler=MutatateUsingCentersSampler( predefined_samplings=top_trial_config, - n_mutations=random.randint(1, int(len(top_trial_config) / 2)), + n_mutations=max(1, random.randint(1, int(len(top_trial_config) / 2))), ), environment_values=self._environment_values, ) @@ -1349,7 +1349,7 @@ def __call__( pipeline=self._pipeline, domain_sampler=MutateByForgettingSampler( predefined_samplings=top_trial_config, - n_forgets=random.randint(1, int(len(top_trial_config) / 2)), + n_forgets=max(1, random.randint(1, int(len(top_trial_config) / 2))), ), environment_values=self._environment_values, ) @@ -1423,7 +1423,7 @@ def __call__( # ------------------------------------------------- -class _NepsCompatConverter: +class NepsCompatConverter: _SAMPLING_PREFIX = "SAMPLING__" _ENVIRONMENT_PREFIX = "ENVIRONMENT__" _SAMPLING_PREFIX_LEN = len(_SAMPLING_PREFIX) @@ -1485,7 +1485,7 @@ def _prepare_sampled_configs( ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: configs = [] for i, (_resolved_pipeline, resolution_context) in enumerate(chosen_pipelines): - neps_config = _NepsCompatConverter.to_neps_config( + neps_config = NepsCompatConverter.to_neps_config( resolution_context=resolution_context, ) @@ -1513,7 +1513,7 @@ def inner(*args: Any, **kwargs: Any) -> Any: # the samplings to make or to environment values. # That is not an issue. Those items will be passed through. - sampled_pipeline_data = _NepsCompatConverter.from_neps_config(config=kwargs) + sampled_pipeline_data = NepsCompatConverter.from_neps_config(config=kwargs) sampled_pipeline, _resolution_context = resolve( pipeline=pipeline_space, @@ -1524,12 +1524,13 @@ def inner(*args: Any, **kwargs: Any) -> Any: ) config = dict(**sampled_pipeline.get_attrs()) + for name, value in config.items(): if isinstance(value, Operation): config[name] = operation_converter(value) - # So that we still pass the kwargs not related to the config. - # Take away all the kwargs which were related to samplings made. + # So that we still pass the kwargs not related to the config, + # start with the extra kwargs we passed to the converter. new_kwargs = dict(**sampled_pipeline_data.extra_kwargs) # Then add all the kwargs from the config. new_kwargs.update(config) diff --git a/neps/space/new_space/tests/test_neps_integration_priorband__max_cost.py b/neps/space/new_space/tests/test_neps_integration_priorband__max_cost.py new file mode 100644 index 000000000..9053b5bab --- /dev/null +++ b/neps/space/new_space/tests/test_neps_integration_priorband__max_cost.py @@ -0,0 +1,182 @@ +from functools import partial + +import numpy as np +import pytest + +import neps +import neps.space.new_space.space as space +import neps.space.new_space.bracket_optimizer as new_bracket_optimizer +import neps.optimizers.algorithms as old_algorithms + + +_COSTS = {} + + +def evaluate_pipeline(float1, float2, integer1, fidelity): + objective_to_minimize = -float(np.sum([float1, float2, integer1])) * fidelity + + key = (float1, float2, integer1) + old_cost = _COSTS.get(key, 0) + added_cost = fidelity - old_cost + + _COSTS[key] = fidelity + + return { + "objective_to_minimize": objective_to_minimize, + "cost": added_cost, + } + + +old_pipeline_space = dict( + float1=neps.Float( + lower=1, + upper=1000, + log=False, + prior=600, + prior_confidence="medium", + ), + float2=neps.Float( + lower=-100, + upper=100, + prior=0, + prior_confidence="medium", + ), + integer1=neps.Integer( + lower=0, + upper=500, + prior=35, + prior_confidence="low", + ), + fidelity=neps.Integer( + lower=1, + upper=100, + is_fidelity=True, + ), +) + + +class DemoHyperparameterWithFidelitySpace(space.Pipeline): + float1 = space.Float( + min_value=1, + max_value=1000, + log=False, + prior=600, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + float2 = space.Float( + min_value=-100, + max_value=100, + prior=0, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + integer1 = space.Integer( + min_value=0, + max_value=500, + prior=35, + prior_confidence=space.ConfidenceLevel.LOW, + ) + fidelity = space.Fidelity( + domain=space.Integer( + min_value=1, + max_value=100, + ), + ) + + +@pytest.mark.parametrize( + ["optimizer", "optimizer_name"], + [ + ( + space.RandomSearch, + "new__RandomSearch", + ), + ( + space.ComplexRandomSearch, + "new__ComplexRandomSearch", + ), + ( + partial(new_bracket_optimizer.priorband, base="successive_halving"), + "new__priorband+successive_halving", + ), + ( + partial(new_bracket_optimizer.priorband, base="asha"), + "new__priorband+asha", + ), + ( + partial(new_bracket_optimizer.priorband, base="async_hb"), + "new__priorband+async_hb", + ), + ( + new_bracket_optimizer.priorband, + "new__priorband+hyperband", + ), + ], +) +def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): + optimizer.__name__ = optimizer_name # Needed by NEPS later. + pipeline_space = DemoHyperparameterWithFidelitySpace() + root_directory = f"results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" + + print() + print(f"\nRunning for root directory: {root_directory}") + + # Reset the _COSTS global, so they do not get mixed up between tests. + _COSTS.clear() + + neps.run( + evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( + evaluate_pipeline, + pipeline_space, + ), + pipeline_space=pipeline_space, + optimizer=optimizer, + root_directory=root_directory, + post_run_summary=True, + max_cost_total=1000, + overwrite_working_directory=True, + ) + neps.status(root_directory, print_summary=True) + + +@pytest.mark.parametrize( + ["optimizer", "optimizer_name"], + [ + ( + partial(old_algorithms.priorband, base="successive_halving"), + "old__priorband+successive_halving", + ), + ( + partial(old_algorithms.priorband, base="asha"), + "old__priorband+asha", + ), + ( + partial(old_algorithms.priorband, base="async_hb"), + "old__priorband+async_hb", + ), + ( + old_algorithms.priorband, + "old__priorband+hyperband", + ), + ], +) +def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): + optimizer.__name__ = optimizer_name # Needed by NEPS later. + pipeline_space = old_pipeline_space + root_directory = f"results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" + + print() + print(f"\nRunning for root directory: {root_directory}") + + # Reset the _COSTS global, so they do not get mixed up between tests. + _COSTS.clear() + + neps.run( + evaluate_pipeline=evaluate_pipeline, + pipeline_space=pipeline_space, + optimizer=optimizer, + root_directory=root_directory, + post_run_summary=True, + max_cost_total=1000, + overwrite_working_directory=True, + ) + neps.status(root_directory, print_summary=True) diff --git a/neps/space/new_space/tests/test_neps_integration_priorband__max_evals.py b/neps/space/new_space/tests/test_neps_integration_priorband__max_evals.py new file mode 100644 index 000000000..f4cd2fcb1 --- /dev/null +++ b/neps/space/new_space/tests/test_neps_integration_priorband__max_evals.py @@ -0,0 +1,164 @@ +from functools import partial + +import numpy as np +import pytest + +import neps +import neps.space.new_space.space as space +import neps.space.new_space.bracket_optimizer as new_bracket_optimizer +import neps.optimizers.algorithms as old_algorithms + + +def evaluate_pipeline(float1, float2, integer1, fidelity): + objective_to_minimize = -float(np.sum([float1, float2, integer1])) * fidelity + # print(fidelity) + return objective_to_minimize + + +old_pipeline_space = dict( + float1=neps.Float( + lower=1, + upper=1000, + log=False, + prior=600, + prior_confidence="medium", + ), + float2=neps.Float( + lower=-100, + upper=100, + prior=0, + prior_confidence="medium", + ), + integer1=neps.Integer( + lower=0, + upper=500, + prior=35, + prior_confidence="low", + ), + fidelity=neps.Integer( + lower=1, + upper=100, + is_fidelity=True, + ), +) + + +class DemoHyperparameterWithFidelitySpace(space.Pipeline): + float1 = space.Float( + min_value=1, + max_value=1000, + log=False, + prior=600, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + float2 = space.Float( + min_value=-100, + max_value=100, + prior=0, + prior_confidence=space.ConfidenceLevel.MEDIUM, + ) + integer1 = space.Integer( + min_value=0, + max_value=500, + prior=35, + prior_confidence=space.ConfidenceLevel.LOW, + ) + fidelity = space.Fidelity( + domain=space.Integer( + min_value=1, + max_value=100, + ), + ) + + +@pytest.mark.parametrize( + ["optimizer", "optimizer_name"], + [ + ( + space.RandomSearch, + "new__RandomSearch", + ), + ( + space.ComplexRandomSearch, + "new__ComplexRandomSearch", + ), + ( + partial(new_bracket_optimizer.priorband, base="successive_halving"), + "new__priorband+successive_halving", + ), + ( + partial(new_bracket_optimizer.priorband, base="asha"), + "new__priorband+asha", + ), + ( + partial(new_bracket_optimizer.priorband, base="async_hb"), + "new__priorband+async_hb", + ), + ( + new_bracket_optimizer.priorband, + "new__priorband+hyperband", + ), + ], +) +def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): + optimizer.__name__ = optimizer_name # Needed by NEPS later. + pipeline_space = DemoHyperparameterWithFidelitySpace() + root_directory = f"results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" + + print() + print(f"\nRunning for root directory: {root_directory}") + + neps.run( + evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( + evaluate_pipeline, + pipeline_space, + ), + pipeline_space=pipeline_space, + optimizer=optimizer, + root_directory=root_directory, + post_run_summary=True, + max_evaluations_total=200, + overwrite_working_directory=True, + ) + neps.status(root_directory, print_summary=True) + + +@pytest.mark.parametrize( + ["optimizer", "optimizer_name"], + [ + ( + partial(old_algorithms.priorband, base="successive_halving"), + "old__priorband+successive_halving", + ), + ( + partial(old_algorithms.priorband, base="asha"), + "old__priorband+asha", + ), + ( + partial(old_algorithms.priorband, base="async_hb"), + "old__priorband+async_hb", + ), + ( + old_algorithms.priorband, + "old__priorband+hyperband", + ), + ], +) +def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): + optimizer.__name__ = optimizer_name # Needed by NEPS later. + pipeline_space = old_pipeline_space + root_directory = f"results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" + + print() + print(f"\nRunning for root directory: {root_directory}") + + neps.run( + evaluate_pipeline=evaluate_pipeline, + pipeline_space=pipeline_space, + optimizer=optimizer, + root_directory=root_directory, + post_run_summary=True, + max_evaluations_total=200, + overwrite_working_directory=True, + ) + neps.status(root_directory, print_summary=True) From 6703974201c444b90e0376129572ebd89d71bcac Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Fri, 25 Apr 2025 18:43:32 +0200 Subject: [PATCH 004/156] WIP --- neps/space/new_space/priorband.py | 6 +++--- neps/space/new_space/space.py | 4 +++- .../tests/test_neps_integration_priorband__max_evals.py | 1 - 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/neps/space/new_space/priorband.py b/neps/space/new_space/priorband.py index 3d74a6258..747988265 100644 --- a/neps/space/new_space/priorband.py +++ b/neps/space/new_space/priorband.py @@ -90,7 +90,7 @@ def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: K = len(rung_table) // self.eta top_k_configs = rung_table.nsmallest(K, columns=["perf"])["config"].tolist() - # 2. Get the global incumbent, and build a prior distribution around it + # 2. Get the global incumbent inc_config = completed.loc[completed["perf"].idxmin()]["config"] # 3. Calculate a ratio score of how likely each of the top K configs are under @@ -109,8 +109,8 @@ def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: # TODO: [lum]: Here I am simply using fixed values. # Will maybe have to come up with a way to approximate the pdf for the top configs. - inc_ratio = float(0.75) - prior_ratio = float(0.25) + inc_ratio = 0.9 + prior_ratio = 0.1 # 4. And finally, we distribute the original w_prior according to this ratio w_inc = w_prior * inc_ratio diff --git a/neps/space/new_space/space.py b/neps/space/new_space/space.py index f98fe983c..55472b6e8 100644 --- a/neps/space/new_space/space.py +++ b/neps/space/new_space/space.py @@ -178,6 +178,8 @@ def _calculate_new_domain_bounds( chunk_size = (max_value - min_value) / number_of_chunks # The numbers refer to how many segments to have on each side of the center. + # TODO: [lum] we need to make sure that in the end the range does not just have the center, + # but at least a little bit more around it too. confidence_to_number_of_chunks_on_each_side = { ConfidenceLevel.HIGH: 1.0, ConfidenceLevel.MEDIUM: 2.5, @@ -645,7 +647,7 @@ def __call__( if current_path not in self._kept_samplings_to_make: # For this path we either have forgotten the value or we never had it. if current_path in self._original_samplings_to_make: - # If we had a value for this path originally, use it as a center. + # We had a value for this path originally, use it as a center. original_value = self._original_samplings_to_make[current_path] sampled_value = domain_obj.centered_around( center=original_value, diff --git a/neps/space/new_space/tests/test_neps_integration_priorband__max_evals.py b/neps/space/new_space/tests/test_neps_integration_priorband__max_evals.py index f4cd2fcb1..1b7bc06ab 100644 --- a/neps/space/new_space/tests/test_neps_integration_priorband__max_evals.py +++ b/neps/space/new_space/tests/test_neps_integration_priorband__max_evals.py @@ -11,7 +11,6 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): objective_to_minimize = -float(np.sum([float1, float2, integer1])) * fidelity - # print(fidelity) return objective_to_minimize From 9cbcc97de0e6b701400c9c5c8355807e52b3a763 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Sun, 11 May 2025 18:10:45 +0200 Subject: [PATCH 005/156] Allow having Operation args and kwargs themselves be Resolvable objects --- neps/space/new_space/space.py | 60 +++- .../tests/test_search_space__grammar_like.py | 309 ++++++++++++++++++ 2 files changed, 357 insertions(+), 12 deletions(-) create mode 100644 neps/space/new_space/tests/test_search_space__grammar_like.py diff --git a/neps/space/new_space/space.py b/neps/space/new_space/space.py index 55472b6e8..3d85a6745 100644 --- a/neps/space/new_space/space.py +++ b/neps/space/new_space/space.py @@ -205,11 +205,11 @@ def _calculate_new_domain_bounds( class Categorical(Domain[int], Generic[T]): def __init__( self, - choices: tuple[T | Domain[T] | Resolvable, ...] | Domain[T], + choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T], prior_index: int | Domain[int] | _Unset = _UNSET, prior_confidence: ConfidenceLevel | _Unset = _UNSET, ): - self._choices: tuple[T | Domain[T] | Resolvable, ...] | Domain[T] + self._choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T] if isinstance(choices, Sequence): self._choices = tuple(choice for choice in choices) else: @@ -422,12 +422,22 @@ class Operation(Resolvable): def __init__( self, operator: Callable | str, - args: Sequence[Any] | None = None, - kwargs: Mapping[str, Any] | None = None, + args: Sequence[Any] | Resolvable | None = None, + kwargs: Mapping[str, Any] | Resolvable | None = None, ): self._operator = operator - self._args = tuple(args) if args else tuple() - self._kwargs = kwargs if kwargs else {} + + self._args: tuple[Any, ...] | Resolvable + if not isinstance(args, Resolvable): + self._args = tuple(args) if args else tuple() + else: + self._args = args + + self._kwargs: Mapping[str, Any] | Resolvable + if not isinstance(kwargs, Resolvable): + self._kwargs = kwargs if kwargs else {} + else: + self._kwargs = kwargs @property def operator(self) -> Callable | str: @@ -435,14 +445,14 @@ def operator(self) -> Callable | str: @property def args(self) -> tuple[Any, ...]: - return self._args + return cast(tuple[Any, ...], self._args) @property def kwargs(self) -> Mapping[str, Any]: - return self._kwargs + return cast(Mapping[str, Any], self._kwargs) def get_attrs(self) -> Mapping[str, Any]: - # TODO: simplify this. We know the fields. Maybe other places too. + # TODO: [lum] simplify this. We know the fields. Maybe other places too. result: dict[str, Any] = {} for name, value in vars(self).items(): name = name.lstrip("_") @@ -458,7 +468,7 @@ def get_attrs(self) -> Mapping[str, Any]: return result def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: - # TODO: simplify this. We know the fields. Maybe other places too. + # TODO: [lum] simplify this. We know the fields. Maybe other places too. final_attrs: dict[str, Any] = {} for name, value in attrs.items(): if "{" in name and "}" in name: @@ -1025,6 +1035,13 @@ def _( if context.was_already_resolved(operation_obj): return context.get_resolved(operation_obj) + # It is possible that the `operation_obj` will require two runs to be fully resolved. + # For example if `operation_obj.args` is not defined as a tuple of args, + # but is a Resolvable that needs to be resolved first itself, + # for us to have the actual tuple of args. + + # First run. + initial_attrs = operation_obj.get_attrs() final_attrs = {} needed_resolving = False @@ -1034,9 +1051,28 @@ def _( final_attrs[attr_name] = resolved_attr_value needed_resolving = needed_resolving or (initial_attr_value is not resolved_attr_value) - result = operation_obj + operation_obj_first_run = operation_obj + if needed_resolving: + operation_obj_first_run = operation_obj.from_attrs(final_attrs) + + # Second run. + # It is possible the first run was enough, + # in this case what we do below won't lead to any changes. + + initial_attrs = operation_obj_first_run.get_attrs() + final_attrs = {} + needed_resolving = False + + for attr_name, initial_attr_value in initial_attrs.items(): + resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) + final_attrs[attr_name] = resolved_attr_value + needed_resolving = needed_resolving or (initial_attr_value is not resolved_attr_value) + + operation_obj_second_run = operation_obj_first_run if needed_resolving: - result = operation_obj.from_attrs(final_attrs) + operation_obj_second_run = operation_obj_first_run.from_attrs(final_attrs) + + result = operation_obj_second_run context.add_resolved(operation_obj, result) return result diff --git a/neps/space/new_space/tests/test_search_space__grammar_like.py b/neps/space/new_space/tests/test_search_space__grammar_like.py new file mode 100644 index 000000000..b231ac1be --- /dev/null +++ b/neps/space/new_space/tests/test_search_space__grammar_like.py @@ -0,0 +1,309 @@ +import pytest + +from neps.space.new_space import space +from neps.space.new_space import config_string + + +class GrammarLike(space.Pipeline): + _id = space.Operation(operator="Identity") + _three = space.Operation(operator="Conv2D-3") + _one = space.Operation(operator="Conv2D-1") + _reluconvbn = space.Operation(operator="ReLUConvBN") + + _O = space.Categorical(choices=(_three, _one, _id)) + + _C0 = space.Operation( + operator="Sequential", + args=(space.Resampled(_O),), + ) + _C1 = space.Operation( + operator="Sequential", + args=(space.Resampled(_O), space.Resampled("S"), _reluconvbn), + ) + _C2 = space.Operation( + operator="Sequential", + args=(space.Resampled(_O), space.Resampled("S")), + ) + _C3 = space.Operation( + operator="Sequential", + args=(space.Resampled("S"),), + ) + _C = space.Categorical( + choices=( + space.Resampled(_C0), + space.Resampled(_C1), + space.Resampled(_C2), + space.Resampled(_C3), + ), + ) + + _S0 = space.Operation( + operator="Sequential", + args=(space.Resampled(_C),), + ) + _S1 = space.Operation( + operator="Sequential", + args=(_reluconvbn,), + ) + _S2 = space.Operation( + operator="Sequential", + args=(space.Resampled("S"),), + ) + _S3 = space.Operation( + operator="Sequential", + args=(space.Resampled("S"), space.Resampled(_C)), + ) + _S4 = space.Operation( + operator="Sequential", + args=(space.Resampled(_O), space.Resampled(_O), space.Resampled(_O)), + ) + _S5 = space.Operation( + operator="Sequential", + args=( + space.Resampled("S"), + space.Resampled("S"), + space.Resampled(_O), + space.Resampled(_O), + space.Resampled(_O), + space.Resampled(_O), + space.Resampled(_O), + space.Resampled(_O), + ), + ) + S = space.Categorical( + choices=( + space.Resampled(_S0), + space.Resampled(_S1), + space.Resampled(_S2), + space.Resampled(_S3), + space.Resampled(_S4), + space.Resampled(_S5), + ), + ) + + +class GrammarLikeAlt(space.Pipeline): + _id = space.Operation(operator="Identity") + _three = space.Operation(operator="Conv2D-3") + _one = space.Operation(operator="Conv2D-1") + _reluconvbn = space.Operation(operator="ReLUConvBN") + + _O = space.Categorical(choices=(_three, _one, _id)) + + _C_ARGS = space.Categorical( + choices=( + (space.Resampled(_O),), + (space.Resampled(_O), space.Resampled("S"), _reluconvbn), + (space.Resampled(_O), space.Resampled("S")), + (space.Resampled("S"),), + ), + ) + _C = space.Operation( + operator="Sequential", + args=space.Resampled(_C_ARGS), + ) + + _S_ARGS = space.Categorical( + choices=( + (space.Resampled(_C),), + (_reluconvbn,), + (space.Resampled("S"),), + (space.Resampled("S"), space.Resampled(_C)), + (space.Resampled(_O), space.Resampled(_O), space.Resampled(_O)), + ( + space.Resampled("S"), + space.Resampled("S"), + space.Resampled(_O), + space.Resampled(_O), + space.Resampled(_O), + space.Resampled(_O), + space.Resampled(_O), + space.Resampled(_O), + ), + ), + ) + S = space.Operation( + operator="Sequential", + args=space.Resampled(_S_ARGS), + ) + + +@pytest.mark.repeat(500) +def test_resolve(): + pipeline = GrammarLike() + + try: + resolved_pipeline, resolution_context = space.resolve(pipeline) + except RecursionError: + pytest.xfail("XFAIL due to too much recursion.") + raise + + s = resolved_pipeline.S + s_config_string = space.convert_operation_to_string(s) + assert s_config_string + pretty_config = config_string.ConfigString(s_config_string).pretty_format() + assert pretty_config + + # print() + # print("Config string:") + # print(pretty_config) + # + # print() + # print("Samplings made:") + # import pprint + # pprint.pp(resolution_context.samplings_made, indent=2) + + +@pytest.mark.repeat(500) +def test_resolve_alt(): + pipeline = GrammarLikeAlt() + + try: + resolved_pipeline, resolution_context = space.resolve(pipeline) + except RecursionError: + pytest.xfail("XFAIL due to too much recursion.") + raise + + s = resolved_pipeline.S + s_config_string = space.convert_operation_to_string(s) + assert s_config_string + pretty_config = config_string.ConfigString(s_config_string).pretty_format() + assert pretty_config + + # print() + # print("Config string:") + # print(pretty_config) + # + # print() + # print("Samplings made:") + # import pprint + # pprint.pp(resolution_context.samplings_made, indent=2) + + +def test_resolve_context(): + samplings_to_make = { + "Resolvable.S::categorical__6": 5, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": 3, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": 1, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__4": 1, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__3": 0, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": 5, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": 0, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__4": 3, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": 4, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__3": 2, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__3": 0, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[2].resampled_categorical::categorical__3": 2, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": 1, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[2].resampled_categorical::categorical__3": 0, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[3].resampled_categorical::categorical__3": 2, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[4].resampled_categorical::categorical__3": 1, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[5].resampled_categorical::categorical__3": 0, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[6].resampled_categorical::categorical__3": 1, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[7].resampled_categorical::categorical__3": 2, + "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": 2, + "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": 2, + "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": 0, + "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__4": 2, + "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__3": 2, + "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": 1, + "Resolvable.S.sampled_value.resampled_operation.args[2].resampled_categorical::categorical__3": 1, + "Resolvable.S.sampled_value.resampled_operation.args[3].resampled_categorical::categorical__3": 1, + "Resolvable.S.sampled_value.resampled_operation.args[4].resampled_categorical::categorical__3": 2, + "Resolvable.S.sampled_value.resampled_operation.args[5].resampled_categorical::categorical__3": 2, + "Resolvable.S.sampled_value.resampled_operation.args[6].resampled_categorical::categorical__3": 1, + "Resolvable.S.sampled_value.resampled_operation.args[7].resampled_categorical::categorical__3": 1, + } + expected_s_config_string = "(Sequential (Sequential (Sequential (ReLUConvBN)) (Sequential (Conv2D-3) (Sequential (Sequential (Sequential (Sequential (Identity) (Conv2D-3) (Identity)))) (Sequential (ReLUConvBN)) (Conv2D-3) (Identity) (Conv2D-1) (Conv2D-3) (Conv2D-1) (Identity)) (ReLUConvBN))) (Sequential (Sequential (Sequential (Sequential (Identity) (Sequential (ReLUConvBN)))))) (Conv2D-1) (Conv2D-1) (Identity) (Identity) (Conv2D-1) (Conv2D-1))" + + pipeline = GrammarLike() + + resolved_pipeline, resolution_context = space.resolve( + pipeline, + domain_sampler=space.OnlyPredefinedValuesSampler( + predefined_samplings=samplings_to_make, + ), + ) + sampled_values = resolution_context.samplings_made + + assert resolved_pipeline is not None + assert sampled_values is not None + assert sampled_values is not samplings_to_make + assert sampled_values == samplings_to_make + assert list(sampled_values.items()) == list(samplings_to_make.items()) + + # we should have made exactly those samplings + assert sampled_values == samplings_to_make + + s = resolved_pipeline.S + s_config_string = space.convert_operation_to_string(s) + assert s_config_string + assert s_config_string == expected_s_config_string + + # print() + # print("Config string:") + # pretty_config = config_string.ConfigString(s_config_string).pretty_format() + # print(pretty_config) + # + # print() + # print("Samplings made:") + # import pprint + # pprint.pp(resolution_context.samplings_made, indent=2) + + +def test_resolve_context_alt(): + samplings_to_make = { + "Resolvable.S.args.resampled_categorical::categorical__6": 3, + "Resolvable.S.args[0].resampled_operation.args.resampled_categorical::categorical__6": 0, + "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4": 1, + "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3": 2, + "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6": 3, + "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": 1, + "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4": 0, + "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3": 0, + "Resolvable.S.args[1].resampled_operation.args.resampled_categorical::categorical__4": 3, + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": 3, + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": 0, + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4": 0, + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3": 0, + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4": 3, + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": 4, + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3": 1, + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3": 2, + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3": 0, + } + expected_s_config_string = "(Sequential (Sequential (Sequential (Identity) (Sequential (Sequential (ReLUConvBN)) (Sequential (Conv2D-3))) (ReLUConvBN))) (Sequential (Sequential (Sequential (Sequential (Conv2D-3))) (Sequential (Sequential (Conv2D-1) (Identity) (Conv2D-3))))))" + + pipeline = GrammarLikeAlt() + + resolved_pipeline, resolution_context = space.resolve( + pipeline, + domain_sampler=space.OnlyPredefinedValuesSampler( + predefined_samplings=samplings_to_make, + ), + ) + sampled_values = resolution_context.samplings_made + + assert resolved_pipeline is not None + assert sampled_values is not None + assert sampled_values is not samplings_to_make + assert sampled_values == samplings_to_make + assert list(sampled_values.items()) == list(samplings_to_make.items()) + + # we should have made exactly those samplings + assert sampled_values == samplings_to_make + + s = resolved_pipeline.S + s_config_string = space.convert_operation_to_string(s) + assert s_config_string + assert s_config_string == expected_s_config_string + + # print() + # print("Config string:") + # pretty_config = config_string.ConfigString(s_config_string).pretty_format() + # print(pretty_config) + # + # print() + # print("Samplings made:") + # import pprint + # pprint.pp(resolution_context.samplings_made, indent=2) From ed417fe930a1f4d9986db1f3853086e0a7ba4a03 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Fri, 23 May 2025 20:40:08 +0200 Subject: [PATCH 006/156] Add NOS like space --- .../tests/test_search_space__nos_like.py | 136 ++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 neps/space/new_space/tests/test_search_space__nos_like.py diff --git a/neps/space/new_space/tests/test_search_space__nos_like.py b/neps/space/new_space/tests/test_search_space__nos_like.py new file mode 100644 index 000000000..365513639 --- /dev/null +++ b/neps/space/new_space/tests/test_search_space__nos_like.py @@ -0,0 +1,136 @@ +# import nosbench +# from nosbench.program import Program, Instruction, Pointer +# from nosbench.function import Function + +import pytest + +from neps.space.new_space import space +from neps.space.new_space import config_string + + +class nosBench(space.Pipeline): + _UNARY_FUN = space.Categorical( + choices=( + space.Operation(operator="Square"), + space.Operation(operator="Exp"), + space.Operation(operator="Log"), + ) + ) + + _BINARY_FUN = space.Categorical( + choices=( + space.Operation(operator="Add"), + space.Operation(operator="Sub"), + space.Operation(operator="Mul"), + ) + ) + + _TERNARY_FUN = space.Categorical( + choices=( + space.Operation(operator="Interpolate"), + space.Operation(operator="Bias_Correct"), + ) + ) + + _PARAMS = space.Categorical( + choices=( + space.Operation(operator="Params"), + space.Operation(operator="Gradient"), + space.Operation(operator="Opt_Step"), + ) + ) + _CONST = space.Integer(3, 8) + _VAR = space.Integer(9, 19) + + _POINTER = space.Categorical( + choices=( + space.Resampled(_PARAMS), + space.Resampled(_CONST), + space.Resampled(_VAR), + ), + ) + + _UNARY = space.Operation( + operator="Unary", + args=( + space.Resampled(_UNARY_FUN), + space.Resampled(_POINTER), + ), + ) + + _BINARY = space.Operation( + operator="Binary", + args=( + space.Resampled(_BINARY_FUN), + space.Resampled(_POINTER), + space.Resampled(_POINTER), + ), + ) + + _TERNARY = space.Operation( + operator="Ternary", + args=( + space.Resampled(_TERNARY_FUN), + space.Resampled(_POINTER), + space.Resampled(_POINTER), + space.Resampled(_POINTER), + ), + ) + + _F_ARGS = space.Categorical( + choices=( + space.Resampled(_UNARY), + space.Resampled(_BINARY), + space.Resampled(_TERNARY), + ), + ) + + _F = space.Operation( + operator="Function", + args=(space.Resampled(_F_ARGS),), + kwargs={"var": space.Resampled(_VAR)}, + ) + + _L_ARGS = space.Categorical( + choices=( + (space.Resampled(_F),), + (space.Resampled(_F), space.Resampled("_L")), + ), + ) + + _L = space.Operation( + operator="Line_operator", + args=space.Resampled(_L_ARGS), + ) + + P = space.Operation( + operator="Program", + args=(space.Resampled(_L),), + ) + + +@pytest.mark.repeat(500) +def test_resolve(): + pipeline = nosBench() + + try: + resolved_pipeline, resolution_context = space.resolve(pipeline) + except RecursionError: + pytest.xfail("XFAIL due to too much recursion.") + raise + + p = resolved_pipeline.P + p_config_string = space.convert_operation_to_string(p) + assert p_config_string + pretty_config = config_string.ConfigString(p_config_string).pretty_format() + assert pretty_config + + print() + print("Config string:") + print(pretty_config) + + # print() + # print("Samplings made:") + # import pprint + # + # pprint.pp(resolution_context.samplings_made, indent=2) From 0ad5ac1400a071e0e45f30db0e35f02a9b7f7bc3 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Fri, 23 May 2025 21:00:08 +0200 Subject: [PATCH 007/156] Minor change in config pretty print --- neps/space/new_space/config_string.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/neps/space/new_space/config_string.py b/neps/space/new_space/config_string.py index 06805badf..6399fbbf7 100644 --- a/neps/space/new_space/config_string.py +++ b/neps/space/new_space/config_string.py @@ -216,10 +216,15 @@ def at_hierarchy_level(self, level: int) -> ConfigString: return self._at_hierarchy_level_cache[level] def pretty_format(self) -> str: - format_str = "{indent}{item.level:0>2d} :: {item.operator} {item.hyperparameters}" + format_str_with_kwargs = "{indent}{item.level:0>2d} :: {item.operator} {item.hyperparameters}" + format_str_no_kwargs = "{indent}{item.level:0>2d} :: {item.operator}" lines = [self.config_string] for item in self.unwrapped: - lines.append(format_str.format(item=item, indent="\t" * item.level)) + if item.hyperparameters not in {"{}", ""}: + line = format_str_with_kwargs.format(item=item, indent="\t" * item.level) + else: + line = format_str_no_kwargs.format(item=item, indent="\t" * item.level) + lines.append(line) return "\n".join(lines) def __eq__(self, other: object) -> bool: From 8a68a2c0b0a475fff9a214c01cd3752e3224c5e9 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 1 Jul 2025 23:26:02 +0200 Subject: [PATCH 008/156] Add comprehensive tests for NEPS space functionality - Introduced tests for various aspects of the NEPS space, including: - `test_search_space__nos_like.py`: Tests for the resolution of a complex pipeline with unary, binary, and ternary operations. - `test_search_space__recursion.py`: Validates recursive operations within the pipeline, ensuring correct factor propagation. - `test_search_space__resampled.py`: Tests for resampling behavior in both float and integer pipelines, ensuring shared and unique values are handled correctly. - `test_search_space__reuse_arch_elements.py`: Verifies the reuse of architectural elements in pipelines, ensuring correct operation and configuration string generation. - `utils.py`: Added utility functions to generate possible configuration strings for pipelines, aiding in testing and validation. --- pyproject.toml | 4 + tests/test_neps_space/__init__.py | 0 .../test_domain__centering.py | 56 ++-- .../test_neps_space}/test_neps_integration.py | 39 +-- ...st_neps_integration_priorband__max_cost.py | 29 +- ...t_neps_integration_priorband__max_evals.py | 31 +-- .../test_search_space__fidelity.py | 4 +- .../test_search_space__grammar_like.py | 247 +++++++++++------- .../test_search_space__hnas_like.py | 140 +++++++--- .../test_search_space__nos_like.py | 21 +- .../test_search_space__recursion.py | 11 +- .../test_search_space__resampled.py | 26 +- .../test_search_space__reuse_arch_elements.py | 36 ++- .../tests => tests/test_neps_space}/utils.py | 9 +- 14 files changed, 416 insertions(+), 237 deletions(-) create mode 100644 tests/test_neps_space/__init__.py rename {neps/space/new_space/tests => tests/test_neps_space}/test_domain__centering.py (82%) rename {neps/space/new_space/tests => tests/test_neps_space}/test_neps_integration.py (90%) rename {neps/space/new_space/tests => tests/test_neps_space}/test_neps_integration_priorband__max_cost.py (91%) rename {neps/space/new_space/tests => tests/test_neps_space}/test_neps_integration_priorband__max_evals.py (88%) rename {neps/space/new_space/tests => tests/test_neps_space}/test_search_space__fidelity.py (97%) rename {neps/space/new_space/tests => tests/test_neps_space}/test_search_space__grammar_like.py (77%) rename {neps/space/new_space/tests => tests/test_neps_space}/test_search_space__hnas_like.py (60%) rename {neps/space/new_space/tests => tests/test_neps_space}/test_search_space__nos_like.py (86%) rename {neps/space/new_space/tests => tests/test_neps_space}/test_search_space__recursion.py (88%) rename {neps/space/new_space/tests => tests/test_neps_space}/test_search_space__resampled.py (93%) rename {neps/space/new_space/tests => tests/test_neps_space}/test_search_space__reuse_arch_elements.py (93%) rename {neps/space/new_space/tests => tests/test_neps_space}/utils.py (81%) diff --git a/pyproject.toml b/pyproject.toml index 48954905a..78acfde3f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -239,6 +239,10 @@ ignore = [ "PT011", # Catch value error to broad "ARG001", # unused param ] +"tests/test_neps_space/*.py" = [ + "E501", # Line length for architecture strings +] + "__init__.py" = ["I002"] "neps_examples/*" = [ "INP001", diff --git a/tests/test_neps_space/__init__.py b/tests/test_neps_space/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neps/space/new_space/tests/test_domain__centering.py b/tests/test_neps_space/test_domain__centering.py similarity index 82% rename from neps/space/new_space/tests/test_domain__centering.py rename to tests/test_neps_space/test_domain__centering.py index b04769233..5ea6676a3 100644 --- a/neps/space/new_space/tests/test_domain__centering.py +++ b/tests/test_neps_space/test_domain__centering.py @@ -1,10 +1,12 @@ +from __future__ import annotations + import pytest -import neps.space.new_space.space as space +from neps.space.new_space import space @pytest.mark.parametrize( - ["confidence_level", "expected_prior_min_max"], + ("confidence_level", "expected_prior_min_max"), [ (space.ConfidenceLevel.LOW, (50, 10, 90)), (space.ConfidenceLevel.MEDIUM, (50, 25, 75)), @@ -55,7 +57,7 @@ def test_centering_integer( @pytest.mark.parametrize( - ["confidence_level", "expected_prior_min_max"], + ("confidence_level", "expected_prior_min_max"), [ (space.ConfidenceLevel.LOW, (50.0, 10.399999999999999, 89.6)), (space.ConfidenceLevel.MEDIUM, (50.0, 25.25, 74.75)), @@ -106,7 +108,7 @@ def test_centering_float( @pytest.mark.parametrize( - ["confidence_level", "expected_prior_min_max_value"], + ("confidence_level", "expected_prior_min_max_value"), [ (space.ConfidenceLevel.LOW, (40, 0, 80, 50)), (space.ConfidenceLevel.MEDIUM, (25, 0, 50, 50)), @@ -132,8 +134,12 @@ def test_centering_categorical( prior_confidence=confidence_level, ) - categorical1_centered = categorical1.centered_around(categorical_prior_index_original, confidence_level) - categorical2_centered = categorical2.centered_around(categorical2.prior, categorical2.prior_confidence) + categorical1_centered = categorical1.centered_around( + categorical_prior_index_original, confidence_level + ) + categorical2_centered = categorical2.centered_around( + categorical2.prior, categorical2.prior_confidence + ) # During the centering of categorical objects, the prior index will change. assert categorical_prior_index_original != expected_prior_min_max_value[0] @@ -159,7 +165,7 @@ def test_centering_categorical( @pytest.mark.parametrize( - ["confidence_level", "expected_prior_min_max"], + ("confidence_level", "expected_prior_min_max"), [ (space.ConfidenceLevel.LOW, (10, 5, 13)), (space.ConfidenceLevel.MEDIUM, (10, 7, 13)), @@ -184,15 +190,23 @@ def test_centering_stranger_ranges_integer( ) int2_centered = int2.centered_around(int2.prior, int2.prior_confidence) - assert (int1_centered.prior, int1_centered.min_value, int1_centered.max_value) == expected_prior_min_max - assert (int2_centered.prior, int2_centered.min_value, int2_centered.max_value) == expected_prior_min_max + assert ( + int1_centered.prior, + int1_centered.min_value, + int1_centered.max_value, + ) == expected_prior_min_max + assert ( + int2_centered.prior, + int2_centered.min_value, + int2_centered.max_value, + ) == expected_prior_min_max int1_centered.sample() int2_centered.sample() @pytest.mark.parametrize( - ["confidence_level", "expected_prior_min_max"], + ("confidence_level", "expected_prior_min_max"), [ (space.ConfidenceLevel.LOW, (0.5, 0.09999999999999998, 0.9)), (space.ConfidenceLevel.MEDIUM, (0.5, 0.25, 0.75)), @@ -217,15 +231,23 @@ def test_centering_stranger_ranges_float( ) float2_centered = float2.centered_around(float2.prior, float2.prior_confidence) - assert (float1_centered.prior, float1_centered.min_value, float1_centered.max_value) == expected_prior_min_max - assert (float2_centered.prior, float2_centered.min_value, float2_centered.max_value) == expected_prior_min_max + assert ( + float1_centered.prior, + float1_centered.min_value, + float1_centered.max_value, + ) == expected_prior_min_max + assert ( + float2_centered.prior, + float2_centered.min_value, + float2_centered.max_value, + ) == expected_prior_min_max float1_centered.sample() float2_centered.sample() @pytest.mark.parametrize( - ["confidence_level", "expected_prior_min_max_value"], + ("confidence_level", "expected_prior_min_max_value"), [ (space.ConfidenceLevel.LOW, (2, 0, 5, 2)), (space.ConfidenceLevel.MEDIUM, (2, 0, 4, 2)), @@ -237,16 +259,18 @@ def test_centering_stranger_ranges_categorical( expected_prior_min_max_value, ): categorical1 = space.Categorical( - choices=tuple(range(0, 7)), + choices=tuple(range(7)), ) categorical1_centered = categorical1.centered_around(2, confidence_level) categorical2 = space.Categorical( - choices=tuple(range(0, 7)), + choices=tuple(range(7)), prior_index=2, prior_confidence=confidence_level, ) - categorical2_centered = categorical2.centered_around(categorical2.prior, categorical2.prior_confidence) + categorical2_centered = categorical2.centered_around( + categorical2.prior, categorical2.prior_confidence + ) assert ( categorical1_centered.prior, diff --git a/neps/space/new_space/tests/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py similarity index 90% rename from neps/space/new_space/tests/test_neps_integration.py rename to tests/test_neps_space/test_neps_integration.py index ee018c563..0907ce1da 100644 --- a/neps/space/new_space/tests/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -1,9 +1,11 @@ -from typing import Sequence, Callable +from __future__ import annotations + +from collections.abc import Callable, Sequence import pytest import neps -import neps.space.new_space.space as space +from neps.space.new_space import space def hyperparameter_pipeline_to_optimize( @@ -13,9 +15,6 @@ def hyperparameter_pipeline_to_optimize( integer1: int, integer2: int, ): - # print() - # print(f"Evaluating trial: {float1}, {float2}, {categorical}, {integer1}, {integer2}") - assert isinstance(float1, float) assert isinstance(float2, float) assert isinstance(categorical, int) @@ -25,7 +24,6 @@ def hyperparameter_pipeline_to_optimize( objective_to_minimize = -float(float1 + float2 + categorical + integer1 + integer2) assert isinstance(objective_to_minimize, float) - # print(f"Score: {objective_to_minimize}") return objective_to_minimize @@ -163,8 +161,6 @@ def test_hyperparameter_demo(optimizer): max_evaluations_total=10, overwrite_working_directory=True, ) - print() - print(f"\nRoot directory: {root_directory}") neps.status(root_directory, print_summary=True) @@ -188,8 +184,6 @@ def test_hyperparameter_with_fidelity_demo(optimizer): max_evaluations_total=10, overwrite_working_directory=True, ) - print() - print(f"\nRoot directory: {root_directory}") neps.status(root_directory, print_summary=True) @@ -213,8 +207,6 @@ def test_hyperparameter_complex_demo(optimizer): max_evaluations_total=10, overwrite_working_directory=True, ) - print() - print(f"\nRoot directory: {root_directory}") neps.status(root_directory, print_summary=True) @@ -222,11 +214,16 @@ def test_hyperparameter_complex_demo(optimizer): class Model: + """A simple model that takes an inner function and a factor, + multiplies the result of the inner function by the factor. + """ + def __init__( self, inner_function: Callable[[Sequence[float]], float], factor: float, ): + """Initialize the model with an inner function and a factor.""" self.inner_function = inner_function self.factor = factor @@ -235,12 +232,17 @@ def __call__(self, values: Sequence[float]) -> float: class Sum: + """A simple inner function that sums the values.""" + def __call__(self, values: Sequence[float]) -> float: return sum(values) class MultipliedSum: + """An inner function that sums the values and multiplies the result by a factor.""" + def __init__(self, factor: float): + """Initialize the multiplied sum with a factor.""" self.factor = factor def __call__(self, values: Sequence[float]) -> float: @@ -248,12 +250,9 @@ def __call__(self, values: Sequence[float]) -> float: def operation_pipeline_to_optimize(model: Model, some_hp: str): - # print() - # print(f"Evaluating trial: {model}") - assert isinstance(model, Model) assert isinstance(model.factor, float) - assert isinstance(model.inner_function, (Sum, MultipliedSum)) + assert isinstance(model.inner_function, Sum | MultipliedSum) if isinstance(model.inner_function, MultipliedSum): assert isinstance(model.inner_function.factor, float) assert some_hp in {"hp1", "hp2"} @@ -262,11 +261,15 @@ def operation_pipeline_to_optimize(model: Model, some_hp: str): objective_to_minimize = model(values) assert isinstance(objective_to_minimize, float) - # print(f"Score: {objective_to_minimize}") return objective_to_minimize class DemoOperationSpace(space.Pipeline): + """A demonstration of how to use operations in a search space. + This space defines a model that can be optimized using different inner functions + and a factor. The model can be used to evaluate a set of values and return an objective to minimize. + """ + # The way to sample `factor` values _factor = space.Float( min_value=0, @@ -329,6 +332,4 @@ def test_operation_demo(optimizer): max_evaluations_total=10, overwrite_working_directory=True, ) - print() - print(f"\nRoot directory: {root_directory}") neps.status(root_directory, print_summary=True) diff --git a/neps/space/new_space/tests/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py similarity index 91% rename from neps/space/new_space/tests/test_neps_integration_priorband__max_cost.py rename to tests/test_neps_space/test_neps_integration_priorband__max_cost.py index 9053b5bab..95e91e75c 100644 --- a/neps/space/new_space/tests/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -1,13 +1,14 @@ +from __future__ import annotations + from functools import partial import numpy as np import pytest import neps -import neps.space.new_space.space as space -import neps.space.new_space.bracket_optimizer as new_bracket_optimizer import neps.optimizers.algorithms as old_algorithms - +import neps.space.new_space.bracket_optimizer as new_bracket_optimizer +from neps.space.new_space import space _COSTS = {} @@ -27,32 +28,32 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): } -old_pipeline_space = dict( - float1=neps.Float( +old_pipeline_space = { + "float1": neps.Float( lower=1, upper=1000, log=False, prior=600, prior_confidence="medium", ), - float2=neps.Float( + "float2": neps.Float( lower=-100, upper=100, prior=0, prior_confidence="medium", ), - integer1=neps.Integer( + "integer1": neps.Integer( lower=0, upper=500, prior=35, prior_confidence="low", ), - fidelity=neps.Integer( + "fidelity": neps.Integer( lower=1, upper=100, is_fidelity=True, ), -) +} class DemoHyperparameterWithFidelitySpace(space.Pipeline): @@ -84,7 +85,7 @@ class DemoHyperparameterWithFidelitySpace(space.Pipeline): @pytest.mark.parametrize( - ["optimizer", "optimizer_name"], + ("optimizer", "optimizer_name"), [ ( space.RandomSearch, @@ -117,9 +118,6 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): pipeline_space = DemoHyperparameterWithFidelitySpace() root_directory = f"results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" - print() - print(f"\nRunning for root directory: {root_directory}") - # Reset the _COSTS global, so they do not get mixed up between tests. _COSTS.clear() @@ -139,7 +137,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): @pytest.mark.parametrize( - ["optimizer", "optimizer_name"], + ("optimizer", "optimizer_name"), [ ( partial(old_algorithms.priorband, base="successive_halving"), @@ -164,9 +162,6 @@ def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): pipeline_space = old_pipeline_space root_directory = f"results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" - print() - print(f"\nRunning for root directory: {root_directory}") - # Reset the _COSTS global, so they do not get mixed up between tests. _COSTS.clear() diff --git a/neps/space/new_space/tests/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py similarity index 88% rename from neps/space/new_space/tests/test_neps_integration_priorband__max_evals.py rename to tests/test_neps_space/test_neps_integration_priorband__max_evals.py index 1b7bc06ab..1da454974 100644 --- a/neps/space/new_space/tests/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -1,45 +1,46 @@ +from __future__ import annotations + from functools import partial import numpy as np import pytest import neps -import neps.space.new_space.space as space -import neps.space.new_space.bracket_optimizer as new_bracket_optimizer import neps.optimizers.algorithms as old_algorithms +import neps.space.new_space.bracket_optimizer as new_bracket_optimizer +from neps.space.new_space import space def evaluate_pipeline(float1, float2, integer1, fidelity): - objective_to_minimize = -float(np.sum([float1, float2, integer1])) * fidelity - return objective_to_minimize + return -float(np.sum([float1, float2, integer1])) * fidelity -old_pipeline_space = dict( - float1=neps.Float( +old_pipeline_space = { + "float1": neps.Float( lower=1, upper=1000, log=False, prior=600, prior_confidence="medium", ), - float2=neps.Float( + "float2": neps.Float( lower=-100, upper=100, prior=0, prior_confidence="medium", ), - integer1=neps.Integer( + "integer1": neps.Integer( lower=0, upper=500, prior=35, prior_confidence="low", ), - fidelity=neps.Integer( + "fidelity": neps.Integer( lower=1, upper=100, is_fidelity=True, ), -) +} class DemoHyperparameterWithFidelitySpace(space.Pipeline): @@ -71,7 +72,7 @@ class DemoHyperparameterWithFidelitySpace(space.Pipeline): @pytest.mark.parametrize( - ["optimizer", "optimizer_name"], + ("optimizer", "optimizer_name"), [ ( space.RandomSearch, @@ -104,9 +105,6 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): pipeline_space = DemoHyperparameterWithFidelitySpace() root_directory = f"results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" - print() - print(f"\nRunning for root directory: {root_directory}") - neps.run( evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( evaluate_pipeline, @@ -123,7 +121,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): @pytest.mark.parametrize( - ["optimizer", "optimizer_name"], + ("optimizer", "optimizer_name"), [ ( partial(old_algorithms.priorband, base="successive_halving"), @@ -148,9 +146,6 @@ def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): pipeline_space = old_pipeline_space root_directory = f"results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" - print() - print(f"\nRunning for root directory: {root_directory}") - neps.run( evaluate_pipeline=evaluate_pipeline, pipeline_space=pipeline_space, diff --git a/neps/space/new_space/tests/test_search_space__fidelity.py b/tests/test_neps_space/test_search_space__fidelity.py similarity index 97% rename from neps/space/new_space/tests/test_search_space__fidelity.py rename to tests/test_neps_space/test_search_space__fidelity.py index fb3a265a1..0ed29a9c3 100644 --- a/neps/space/new_space/tests/test_search_space__fidelity.py +++ b/tests/test_neps_space/test_search_space__fidelity.py @@ -1,8 +1,10 @@ +from __future__ import annotations + import re import pytest -import neps.space.new_space.space as space +from neps.space.new_space import space class DemoHyperparametersWithFidelitySpace(space.Pipeline): diff --git a/neps/space/new_space/tests/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py similarity index 77% rename from neps/space/new_space/tests/test_search_space__grammar_like.py rename to tests/test_neps_space/test_search_space__grammar_like.py index b231ac1be..78fb4ee2f 100644 --- a/neps/space/new_space/tests/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -1,7 +1,8 @@ +from __future__ import annotations + import pytest -from neps.space.new_space import space -from neps.space.new_space import config_string +from neps.space.new_space import config_string, space class GrammarLike(space.Pipeline): @@ -144,15 +145,6 @@ def test_resolve(): pretty_config = config_string.ConfigString(s_config_string).pretty_format() assert pretty_config - # print() - # print("Config string:") - # print(pretty_config) - # - # print() - # print("Samplings made:") - # import pprint - # pprint.pp(resolution_context.samplings_made, indent=2) - @pytest.mark.repeat(500) def test_resolve_alt(): @@ -170,51 +162,109 @@ def test_resolve_alt(): pretty_config = config_string.ConfigString(s_config_string).pretty_format() assert pretty_config - # print() - # print("Config string:") - # print(pretty_config) - # - # print() - # print("Samplings made:") - # import pprint - # pprint.pp(resolution_context.samplings_made, indent=2) - def test_resolve_context(): samplings_to_make = { "Resolvable.S::categorical__6": 5, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": 3, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": 1, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__4": 1, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__3": 0, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": 5, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": 0, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__4": 3, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": 4, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__3": 2, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__3": 0, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[2].resampled_categorical::categorical__3": 2, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": 1, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[2].resampled_categorical::categorical__3": 0, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[3].resampled_categorical::categorical__3": 2, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[4].resampled_categorical::categorical__3": 1, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[5].resampled_categorical::categorical__3": 0, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[6].resampled_categorical::categorical__3": 1, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[7].resampled_categorical::categorical__3": 2, - "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": 2, - "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": 2, - "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": 0, - "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__4": 2, - "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__3": 2, - "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": 1, - "Resolvable.S.sampled_value.resampled_operation.args[2].resampled_categorical::categorical__3": 1, - "Resolvable.S.sampled_value.resampled_operation.args[3].resampled_categorical::categorical__3": 1, - "Resolvable.S.sampled_value.resampled_operation.args[4].resampled_categorical::categorical__3": 2, - "Resolvable.S.sampled_value.resampled_operation.args[5].resampled_categorical::categorical__3": 2, - "Resolvable.S.sampled_value.resampled_operation.args[6].resampled_categorical::categorical__3": 1, - "Resolvable.S.sampled_value.resampled_operation.args[7].resampled_categorical::categorical__3": 1, + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": ( + 3 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": ( + 1 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__4": ( + 1 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__3": ( + 0 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": ( + 5 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": ( + 0 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__4": ( + 3 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": ( + 4 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__3": ( + 0 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[2].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": ( + 1 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[2].resampled_categorical::categorical__3": ( + 0 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[3].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[4].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[5].resampled_categorical::categorical__3": ( + 0 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[6].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[7].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": ( + 2 + ), + "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": ( + 2 + ), + "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": ( + 0 + ), + "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__4": ( + 2 + ), + "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": ( + 1 + ), + "Resolvable.S.sampled_value.resampled_operation.args[2].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.sampled_value.resampled_operation.args[3].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.sampled_value.resampled_operation.args[4].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.sampled_value.resampled_operation.args[5].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.sampled_value.resampled_operation.args[6].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.sampled_value.resampled_operation.args[7].resampled_categorical::categorical__3": ( + 1 + ), } - expected_s_config_string = "(Sequential (Sequential (Sequential (ReLUConvBN)) (Sequential (Conv2D-3) (Sequential (Sequential (Sequential (Sequential (Identity) (Conv2D-3) (Identity)))) (Sequential (ReLUConvBN)) (Conv2D-3) (Identity) (Conv2D-1) (Conv2D-3) (Conv2D-1) (Identity)) (ReLUConvBN))) (Sequential (Sequential (Sequential (Sequential (Identity) (Sequential (ReLUConvBN)))))) (Conv2D-1) (Conv2D-1) (Identity) (Identity) (Conv2D-1) (Conv2D-1))" + expected_s_config_string = ( + "(Sequential (Sequential (Sequential (ReLUConvBN)) (Sequential (Conv2D-3)" + " (Sequential (Sequential (Sequential (Sequential (Identity) (Conv2D-3)" + " (Identity)))) (Sequential (ReLUConvBN)) (Conv2D-3) (Identity) (Conv2D-1)" + " (Conv2D-3) (Conv2D-1) (Identity)) (ReLUConvBN))) (Sequential (Sequential" + " (Sequential (Sequential (Identity) (Sequential (ReLUConvBN)))))) (Conv2D-1)" + " (Conv2D-1) (Identity) (Identity) (Conv2D-1) (Conv2D-1))" + ) pipeline = GrammarLike() @@ -240,39 +290,68 @@ def test_resolve_context(): assert s_config_string assert s_config_string == expected_s_config_string - # print() - # print("Config string:") - # pretty_config = config_string.ConfigString(s_config_string).pretty_format() - # print(pretty_config) - # - # print() - # print("Samplings made:") - # import pprint - # pprint.pp(resolution_context.samplings_made, indent=2) - def test_resolve_context_alt(): samplings_to_make = { "Resolvable.S.args.resampled_categorical::categorical__6": 3, - "Resolvable.S.args[0].resampled_operation.args.resampled_categorical::categorical__6": 0, - "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4": 1, - "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3": 2, - "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6": 3, - "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": 1, - "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4": 0, - "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3": 0, - "Resolvable.S.args[1].resampled_operation.args.resampled_categorical::categorical__4": 3, - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": 3, - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": 0, - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4": 0, - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3": 0, - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4": 3, - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": 4, - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3": 1, - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3": 2, - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3": 0, + "Resolvable.S.args[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 0 + ), + "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4": ( + 1 + ), + "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6": ( + 3 + ), + "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 1 + ), + "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4": ( + 0 + ), + "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3": ( + 0 + ), + "Resolvable.S.args[1].resampled_operation.args.resampled_categorical::categorical__4": ( + 3 + ), + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 3 + ), + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 0 + ), + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4": ( + 0 + ), + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3": ( + 0 + ), + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4": ( + 3 + ), + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 4 + ), + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3": ( + 0 + ), } - expected_s_config_string = "(Sequential (Sequential (Sequential (Identity) (Sequential (Sequential (ReLUConvBN)) (Sequential (Conv2D-3))) (ReLUConvBN))) (Sequential (Sequential (Sequential (Sequential (Conv2D-3))) (Sequential (Sequential (Conv2D-1) (Identity) (Conv2D-3))))))" + expected_s_config_string = ( + "(Sequential (Sequential (Sequential (Identity) (Sequential (Sequential" + " (ReLUConvBN)) (Sequential (Conv2D-3))) (ReLUConvBN))) (Sequential (Sequential" + " (Sequential (Sequential (Conv2D-3))) (Sequential (Sequential (Conv2D-1)" + " (Identity) (Conv2D-3))))))" + ) pipeline = GrammarLikeAlt() @@ -297,13 +376,3 @@ def test_resolve_context_alt(): s_config_string = space.convert_operation_to_string(s) assert s_config_string assert s_config_string == expected_s_config_string - - # print() - # print("Config string:") - # pretty_config = config_string.ConfigString(s_config_string).pretty_format() - # print(pretty_config) - # - # print() - # print("Samplings made:") - # import pprint - # pprint.pp(resolution_context.samplings_made, indent=2) diff --git a/neps/space/new_space/tests/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py similarity index 60% rename from neps/space/new_space/tests/test_search_space__hnas_like.py rename to tests/test_neps_space/test_search_space__hnas_like.py index a3728de55..42dd5518d 100644 --- a/neps/space/new_space/tests/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -1,12 +1,12 @@ +from __future__ import annotations + import pytest -from neps.space.new_space import space -from neps.space.new_space import config_string +from neps.space.new_space import config_string, space class HNASLikePipeline(space.Pipeline): - """ - Based on the `hierarchical+shared` variant (cell block is shared everywhere). + """Based on the `hierarchical+shared` variant (cell block is shared everywhere). Across _CONVBLOCK items, _ACT and _CONV also shared. Only the _NORM changes. Additionally, this variant now has a PReLU operation with a float hyperparameter (init). @@ -224,29 +224,119 @@ def test_hnas_like_string(): def test_hnas_like_context(): samplings_to_make = { "Resolvable.CL.args[0].resampled_categorical::categorical__4": 3, - "Resolvable.CL.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[0]::categorical__4": 0, - "Resolvable.CL.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[1]::categorical__3": 2, - "Resolvable.CL.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3": 0, + "Resolvable.CL.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[0]::categorical__4": ( + 0 + ), + "Resolvable.CL.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[1]::categorical__3": ( + 2 + ), + "Resolvable.CL.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3": ( + 0 + ), "Resolvable.CL.args[1].resampled_categorical::categorical__4": 0, "Resolvable.CL.args[2].resampled_categorical::categorical__4": 1, "Resolvable.CL.args[3].resampled_categorical::categorical__4": 2, "Resolvable.CL.args[4].resampled_categorical::categorical__4": 3, - "Resolvable.CL.args[4].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3": 2, + "Resolvable.CL.args[4].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3": ( + 2 + ), "Resolvable.CL.args[5].resampled_categorical::categorical__4": 0, "Resolvable.ARCH::categorical__3": 1, "Resolvable.ARCH.sampled_value.args[0].resampled_categorical::categorical__3": 2, - "Resolvable.ARCH.sampled_value.args[0].resampled_categorical.sampled_value.args[0].resampled_categorical::categorical__3": 2, - "Resolvable.ARCH.sampled_value.args[0].resampled_categorical.sampled_value.args[1].resampled_categorical::categorical__3": 0, + "Resolvable.ARCH.sampled_value.args[0].resampled_categorical.sampled_value.args[0].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.ARCH.sampled_value.args[0].resampled_categorical.sampled_value.args[1].resampled_categorical::categorical__3": ( + 0 + ), "Resolvable.ARCH.sampled_value.args[1].resampled_categorical::categorical__3": 2, - "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[0].resampled_categorical::categorical__3": 0, - "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[1].resampled_categorical::categorical__3": 0, - "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[2].resampled_categorical::categorical__3": 0, - "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[3].resampled_categorical::categorical__3": 1, + "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[0].resampled_categorical::categorical__3": ( + 0 + ), + "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[1].resampled_categorical::categorical__3": ( + 0 + ), + "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[2].resampled_categorical::categorical__3": ( + 0 + ), + "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[3].resampled_categorical::categorical__3": ( + 1 + ), "Resolvable.ARCH.sampled_value.args[2].resampled_categorical::categorical__3": 2, } - expected_cl_config_string = "(CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))" - expected_arch_config_string = "(D2 Sequential3 (D0 Residual3 (C Residual2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (C Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (D1 Residual3 (C Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (C Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (DOWN Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) resBlock) (DOWN Sequential3 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) resBlock)) (D1 Residual3 (C Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (C Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (DOWN Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) resBlock) (DOWN Sequential3 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) resBlock)))" + expected_cl_config_string = ( + "(CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3)" + " (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK" + " Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))" + ) + expected_arch_config_string = ( + "(D2 Sequential3 (D0 Residual3 (C Residual2 (CELL Cell (OPS Sequential1" + " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" + " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" + " (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1" + " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" + " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" + " (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1" + " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" + " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" + " (CONV dconv3x3) (NORM layer))) (OPS zero))) (C Sequential2 (CELL Cell (OPS" + " Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch)))" + " (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT" + " relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1" + " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" + " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" + " (CONV dconv3x3) (NORM layer))) (OPS zero))) (CELL Cell (OPS Sequential1" + " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" + " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" + " (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1" + " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" + " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" + " (CONV dconv3x3) (NORM layer))) (OPS zero))) (D1 Residual3 (C Sequential2 (CELL" + " Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM" + " batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK" + " Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell" + " (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM" + " batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK" + " Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (C" + " Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV" + " dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1" + " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))" + " (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3)" + " (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK" + " Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (DOWN" + " Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV" + " dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1" + " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))" + " resBlock) (DOWN Sequential3 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3" + " (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool)" + " (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM" + " layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT" + " relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS" + " Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer)))" + " (OPS zero)) resBlock)) (D1 Residual3 (C Sequential2 (CELL Cell (OPS Sequential1" + " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" + " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" + " (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1" + " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" + " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" + " (CONV dconv3x3) (NORM layer))) (OPS zero))) (C Sequential2 (CELL Cell (OPS" + " Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch)))" + " (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT" + " relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1" + " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" + " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" + " (CONV dconv3x3) (NORM layer))) (OPS zero))) (DOWN Sequential2 (CELL Cell (OPS" + " Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch)))" + " (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT" + " relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) resBlock) (DOWN Sequential3" + " (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3)" + " (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK" + " Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell" + " (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM" + " batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK" + " Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) resBlock)))" + ) pipeline = HNASLikePipeline() @@ -279,21 +369,3 @@ def test_hnas_like_context(): assert arch_config_string assert arch_config_string == expected_arch_config_string assert cl_config_string in arch_config_string - - # print() - # print("Sampled CELL: " + cl_config_string) - # print("Sampled ARCH: " + arch_config_string) - # print("Sampled values:") - # import pprint - # - # # pprint.pp(sampled_values, indent=2, compact=True) - # - # print() - # - # print("ARCH received:") - # pretty_config = config_string.ConfigString(arch_config_string).pretty_format() - # print(pretty_config) - # - # print("Arch expected:") - # pretty_config = config_string.ConfigString(expected_arch_config_string).pretty_format() - # print(pretty_config) diff --git a/neps/space/new_space/tests/test_search_space__nos_like.py b/tests/test_neps_space/test_search_space__nos_like.py similarity index 86% rename from neps/space/new_space/tests/test_search_space__nos_like.py rename to tests/test_neps_space/test_search_space__nos_like.py index 365513639..5e48663a4 100644 --- a/neps/space/new_space/tests/test_search_space__nos_like.py +++ b/tests/test_neps_space/test_search_space__nos_like.py @@ -1,14 +1,11 @@ -# import nosbench -# from nosbench.program import Program, Instruction, Pointer -# from nosbench.function import Function +from __future__ import annotations import pytest -from neps.space.new_space import space -from neps.space.new_space import config_string +from neps.space.new_space import config_string, space -class nosBench(space.Pipeline): +class NosBench(space.Pipeline): _UNARY_FUN = space.Categorical( choices=( space.Operation(operator="Square"), @@ -111,7 +108,7 @@ class nosBench(space.Pipeline): @pytest.mark.repeat(500) def test_resolve(): - pipeline = nosBench() + pipeline = NosBench() try: resolved_pipeline, resolution_context = space.resolve(pipeline) @@ -124,13 +121,3 @@ def test_resolve(): assert p_config_string pretty_config = config_string.ConfigString(p_config_string).pretty_format() assert pretty_config - - print() - print("Config string:") - print(pretty_config) - - # print() - # print("Samplings made:") - # import pprint - # - # pprint.pp(resolution_context.samplings_made, indent=2) diff --git a/neps/space/new_space/tests/test_search_space__recursion.py b/tests/test_neps_space/test_search_space__recursion.py similarity index 88% rename from neps/space/new_space/tests/test_search_space__recursion.py rename to tests/test_neps_space/test_search_space__recursion.py index 6ea47a690..720d07f53 100644 --- a/neps/space/new_space/tests/test_search_space__recursion.py +++ b/tests/test_neps_space/test_search_space__recursion.py @@ -1,14 +1,21 @@ -from typing import Callable, Sequence +from __future__ import annotations + +from collections.abc import Callable, Sequence from neps.space.new_space import space class Model: + """An inner function that sums the values and multiplies the result by a factor. + This class can be recursively used in a search space to create nested models. + """ + def __init__( self, inner_function: Callable[[Sequence[float]], float], factor: float, ): + """Initialize the model with an inner function and a factor.""" self.inner_function = inner_function self.factor = factor @@ -17,6 +24,8 @@ def __call__(self, values: Sequence[float]) -> float: class Sum: + """A simple inner function that sums the values.""" + def __call__(self, values: Sequence[float]) -> float: return sum(values) diff --git a/neps/space/new_space/tests/test_search_space__resampled.py b/tests/test_neps_space/test_search_space__resampled.py similarity index 93% rename from neps/space/new_space/tests/test_search_space__resampled.py rename to tests/test_neps_space/test_search_space__resampled.py index da16fd045..b0e33190c 100644 --- a/neps/space/new_space/tests/test_search_space__resampled.py +++ b/tests/test_neps_space/test_search_space__resampled.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from neps.space.new_space import space @@ -162,7 +164,9 @@ def test_resampled_float(): assert prelu_init_value == prelu_shared2 assert len(set(resampled_values)) == len(resampled_values) - assert all(resampled_value != prelu_init_value for resampled_value in resampled_values) + assert all( + resampled_value != prelu_init_value for resampled_value in resampled_values + ) @pytest.mark.repeat(200) @@ -202,14 +206,16 @@ def test_resampled_integer(): assert prelu_init_value == prelu_shared2 assert len(set(resampled_values)) == len(resampled_values) - assert all(resampled_value != prelu_init_value for resampled_value in resampled_values) + assert all( + resampled_value != prelu_init_value for resampled_value in resampled_values + ) act = resolved_pipeline.act act_args = tuple(op.kwargs["init"] for op in act.args) sampled_values = (prelu_shared1, prelu_shared2, *resampled_values) assert len(act_args) == len(sampled_values) - for act_arg, sampled_value in zip(act_args, sampled_values): + for act_arg, sampled_value in zip(act_args, sampled_values, strict=False): assert act_arg is sampled_value act_resampled_prelu_shared = act.kwargs["prelu_shared"].kwargs["init"] @@ -228,7 +234,9 @@ def test_resampled_integer(): act_resampled_hp_value = act.kwargs["resampled_hp_value"] assert isinstance(act_resampled_hp_value, int) assert act_resampled_hp_value != prelu_init_value - assert all(resampled_value != act_resampled_hp_value for resampled_value in resampled_values) + assert all( + resampled_value != act_resampled_hp_value for resampled_value in resampled_values + ) @pytest.mark.repeat(200) @@ -257,17 +265,17 @@ def test_resampled_categorical(): assert isinstance(op2, space.Operation) assert (op1 is conv_block) or (op1.operator == "op1") - assert op2.operator == "conv1" or op2.operator == "conv2" or op2.operator == "op2" + assert op2.operator in ("conv1", "conv2", "op2") cell = resolved_pipeline.cell assert cell is not pipeline.cell cell_args1 = cell.args[0] cell_args2 = cell.args[1] - cell_args3 = cell.args[2] - cell_args4 = cell.args[3] - cell_args5 = cell.args[4] - cell_args6 = cell.args[5] + cell.args[2] + cell.args[3] + cell.args[4] + cell.args[5] assert cell_args1 is op1 assert cell_args2 is op2 diff --git a/neps/space/new_space/tests/test_search_space__reuse_arch_elements.py b/tests/test_neps_space/test_search_space__reuse_arch_elements.py similarity index 93% rename from neps/space/new_space/tests/test_search_space__reuse_arch_elements.py rename to tests/test_neps_space/test_search_space__reuse_arch_elements.py index 80208dc8d..50fe5ec24 100644 --- a/neps/space/new_space/tests/test_search_space__reuse_arch_elements.py +++ b/tests/test_neps_space/test_search_space__reuse_arch_elements.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from neps.space.new_space import space @@ -149,7 +151,11 @@ def test_nested_complex(): resolved_pipeline, _resolution_context = space.resolve(pipeline) assert resolved_pipeline is not None - assert tuple(resolved_pipeline.get_attrs().keys()) == ("prelu_init_value", "prelu", "act") + assert tuple(resolved_pipeline.get_attrs().keys()) == ( + "prelu_init_value", + "prelu", + "act", + ) prelu_init_value = resolved_pipeline.prelu_init_value assert 0.1 <= prelu_init_value <= 0.9 @@ -180,7 +186,11 @@ def test_nested_complex_string(): expected_ending = "})" assert act_config_string.startswith(expected_prefix) assert act_config_string.endswith(expected_ending) - assert 0.1 <= float(act_config_string[len(expected_prefix) : -len(expected_ending)]) <= 0.9 + assert ( + 0.1 + <= float(act_config_string[len(expected_prefix) : -len(expected_ending)]) + <= 0.9 + ) def test_fixed_pipeline(): @@ -189,7 +199,9 @@ def test_fixed_pipeline(): resolved_pipeline, _resolution_context = space.resolve(pipeline) assert resolved_pipeline is not None - assert tuple(resolved_pipeline.get_attrs().keys()) == tuple(pipeline.get_attrs().keys()) + assert tuple(resolved_pipeline.get_attrs().keys()) == tuple( + pipeline.get_attrs().keys() + ) assert resolved_pipeline.prelu_init_value == pipeline.prelu_init_value assert resolved_pipeline.prelu is pipeline.prelu @@ -223,13 +235,13 @@ def test_simple_reuse(): ) conv_choices_prior_index = resolved_pipeline.conv_choices_prior_index - assert conv_choices_prior_index == 0 or conv_choices_prior_index == 1 + assert conv_choices_prior_index in (0, 1) conv_choices_prior_confidence = resolved_pipeline.conv_choices_prior_confidence assert conv_choices_prior_confidence in _conv_choices_prior_confidence_choices conv_choices = resolved_pipeline.conv_choices - assert conv_choices == _conv_choices_low or conv_choices == _conv_choices_high + assert conv_choices in (_conv_choices_low, _conv_choices_high) conv_block = resolved_pipeline.conv_block assert conv_block.operator == "sequential3" @@ -378,10 +390,14 @@ def test_shared_complex_context(): # the second resolution should give us a new object assert resolved_pipeline_second is not resolved_pipeline_first - expected_config_string: str = ( - "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (zero) (avg_pool) (zero) (avg_pool) (zero))" - ) + expected_config_string: str = "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (zero) (avg_pool) (zero) (avg_pool) (zero))" # however, their final results should be the same thing - assert space.convert_operation_to_string(resolved_pipeline_first.cell) == expected_config_string - assert space.convert_operation_to_string(resolved_pipeline_second.cell) == expected_config_string + assert ( + space.convert_operation_to_string(resolved_pipeline_first.cell) + == expected_config_string + ) + assert ( + space.convert_operation_to_string(resolved_pipeline_second.cell) + == expected_config_string + ) diff --git a/neps/space/new_space/tests/utils.py b/tests/test_neps_space/utils.py similarity index 81% rename from neps/space/new_space/tests/utils.py rename to tests/test_neps_space/utils.py index a548dfb64..7090b0146 100644 --- a/neps/space/new_space/tests/utils.py +++ b/tests/test_neps_space/utils.py @@ -1,5 +1,6 @@ -import pprint -from typing import Callable +from __future__ import annotations + +from collections.abc import Callable from neps.space.new_space import space @@ -8,7 +9,6 @@ def generate_possible_config_strings( pipeline: space.Pipeline, resolved_pipeline_attr_getter: Callable[[space.Pipeline], space.Operation], num_resolutions: int = 50_000, - display: bool = True, ): result = set() @@ -18,7 +18,4 @@ def generate_possible_config_strings( config_string = space.convert_operation_to_string(attr) result.add(config_string) - if display: - pprint.pprint(result, indent=2) - return result From ceccae30a1c8c244960ff298ab21a98a3fdc87ba Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 2 Jul 2025 01:58:31 +0200 Subject: [PATCH 009/156] Refactor NEPS space imports and update test cases to use neps_space module - Changed all instances of `space` to `neps_space` in test files to reflect the new import structure. - Updated class definitions to inherit from `neps_space.Pipeline` instead of `space.Pipeline`. - Modified the creation of operations and categorical choices to use `neps_space.Operation` and `neps_space.Categorical`. - Adjusted the resolution and conversion functions to utilize the new `neps_space` methods. - Ensured consistency across all test cases for the new structure and functionality. --- neps/api.py | 3 +- neps/optimizers/__init__.py | 9 +- neps/optimizers/algorithms.py | 3 +- .../{new_space => neps_spaces}/__init__.py | 0 .../bracket_optimizer.py | 85 +- .../config_string.py | 113 ++- .../space.py => neps_spaces/neps_space.py} | 946 ++++++++++++++++-- neps/space/neps_spaces/optimizers/__init__.py | 0 .../optimizers}/priorband.py | 71 +- neps/space/parsing.py | 2 +- .../test_neps_space/test_domain__centering.py | 62 +- .../test_neps_space/test_neps_integration.py | 120 +-- ...st_neps_integration_priorband__max_cost.py | 28 +- ...t_neps_integration_priorband__max_evals.py | 28 +- .../test_search_space__fidelity.py | 28 +- .../test_search_space__grammar_like.py | 174 ++-- .../test_search_space__hnas_like.py | 182 ++-- .../test_search_space__nos_like.py | 102 +- .../test_search_space__recursion.py | 18 +- .../test_search_space__resampled.py | 88 +- .../test_search_space__reuse_arch_elements.py | 120 +-- tests/test_neps_space/utils.py | 10 +- 22 files changed, 1522 insertions(+), 670 deletions(-) rename neps/space/{new_space => neps_spaces}/__init__.py (100%) rename neps/space/{new_space => neps_spaces}/bracket_optimizer.py (77%) rename neps/space/{new_space => neps_spaces}/config_string.py (65%) rename neps/space/{new_space/space.py => neps_spaces/neps_space.py} (53%) create mode 100644 neps/space/neps_spaces/optimizers/__init__.py rename neps/space/{new_space => neps_spaces/optimizers}/priorband.py (74%) diff --git a/neps/api.py b/neps/api.py index 77c8ebcf2..955d3a09e 100644 --- a/neps/api.py +++ b/neps/api.py @@ -19,6 +19,7 @@ from neps.optimizers.algorithms import CustomOptimizer from neps.space import Parameter, SearchSpace + from neps.space.neps_spaces.neps_space import Pipeline from neps.state import EvaluatePipelineReturn logger = logging.getLogger(__name__) @@ -47,7 +48,7 @@ def run( # noqa: PLR0913 OptimizerChoice | Mapping[str, Any] | tuple[OptimizerChoice, Mapping[str, Any]] - | Callable[Concatenate[SearchSpace, ...], AskFunction] + | Callable[Concatenate[SearchSpace | Pipeline, ...], AskFunction] | CustomOptimizer | Literal["auto"] ) = "auto", diff --git a/neps/optimizers/__init__.py b/neps/optimizers/__init__.py index 9b2e7f34e..3c747104d 100644 --- a/neps/optimizers/__init__.py +++ b/neps/optimizers/__init__.py @@ -10,10 +10,10 @@ determine_optimizer_automatically, ) from neps.optimizers.optimizer import AskFunction, OptimizerInfo +from neps.space.neps_spaces.neps_space import Pipeline from neps.utils.common import extract_keyword_defaults if TYPE_CHECKING: - from neps.space.new_space.space import Pipeline from neps.space import SearchSpace @@ -48,7 +48,7 @@ def load_optimizer( OptimizerChoice | Mapping[str, Any] | tuple[OptimizerChoice, Mapping[str, Any]] - | Callable[Concatenate[SearchSpace, ...], AskFunction] + | Callable[Concatenate[SearchSpace | Pipeline, ...], AskFunction] | CustomOptimizer | Literal["auto"] ), @@ -57,6 +57,11 @@ def load_optimizer( match optimizer: # Predefined string (including "auto") case str(): + if isinstance(space, Pipeline): + raise ValueError( + "String optimizers are not yet available for NePS spaces." + ) + return _load_optimizer_from_string(optimizer, space) # Predefined string with kwargs diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 6663388a5..3d90e8bb0 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -43,6 +43,7 @@ from neps.optimizers.utils.brackets import Bracket from neps.space import SearchSpace + from neps.space.neps_spaces.neps_space import Pipeline logger = logging.getLogger(__name__) @@ -1162,7 +1163,7 @@ class CustomOptimizer: kwargs: Mapping[str, Any] = field(default_factory=dict) initialized: bool = False - def create(self, space: SearchSpace) -> AskFunction: + def create(self, space: SearchSpace | Pipeline) -> AskFunction: assert not self.initialized, "Custom optimizer already initialized." return self.optimizer(space, **self.kwargs) # type: ignore diff --git a/neps/space/new_space/__init__.py b/neps/space/neps_spaces/__init__.py similarity index 100% rename from neps/space/new_space/__init__.py rename to neps/space/neps_spaces/__init__.py diff --git a/neps/space/new_space/bracket_optimizer.py b/neps/space/neps_spaces/bracket_optimizer.py similarity index 77% rename from neps/space/new_space/bracket_optimizer.py rename to neps/space/neps_spaces/bracket_optimizer.py index 6c0508400..1d8cc5f82 100644 --- a/neps/space/new_space/bracket_optimizer.py +++ b/neps/space/neps_spaces/bracket_optimizer.py @@ -1,18 +1,25 @@ +"""This module provides multi-fidelity optimizers for NePS spaces. +It implements a bracket-based optimization strategy that samples configurations +from a prior band, allowing for efficient exploration of the search space. +It supports different bracket types such as successive halving, hyperband, ASHA, +and async hyperband, and can sample configurations at different fidelity levels. +""" + from __future__ import annotations import logging from collections.abc import Callable, Mapping, Sequence from dataclasses import dataclass from functools import partial -from typing import TYPE_CHECKING, Literal, Any +from typing import TYPE_CHECKING, Any, Literal import pandas as pd +import neps.optimizers.bracket_optimizer as standard_bracket_optimizer from neps.optimizers.optimizer import SampledConfig -from neps.space.new_space.priorband import PriorBandSampler from neps.optimizers.utils.brackets import PromoteAction, SampleAction -import neps.space.new_space.space as new_space -import neps.optimizers.bracket_optimizer as standard_bracket_optimizer +from neps.space.neps_spaces import neps_space +from neps.space.neps_spaces.optimizers.priorband import PriorBandSampler if TYPE_CHECKING: from neps.optimizers.utils.brackets import Bracket @@ -27,7 +34,7 @@ class _BracketOptimizer: """The pipeline space to optimize over.""" - space: new_space.Pipeline + space: neps_space.Pipeline """Whether or not to sample the prior first. @@ -48,10 +55,10 @@ class _BracketOptimizer: """The sampler used to generate new trials.""" sampler: PriorBandSampler - def __call__( # noqa: C901, PLR0912 + def __call__( # noqa: C901 self, trials: Mapping[str, Trial], - budget_info: BudgetInfo | None, + budget_info: BudgetInfo | None, # noqa: ARG002 n: int | None = None, ) -> SampledConfig | list[SampledConfig]: assert n is None, "TODO" @@ -92,7 +99,11 @@ def __call__( # noqa: C901, PLR0912 brackets = [brackets] next_action = next( - (action for bracket in brackets if (action := bracket.next()) not in ("done", "pending")), + ( + action + for bracket in brackets + if (action := bracket.next()) not in ("done", "pending") + ), None, ) @@ -132,8 +143,8 @@ def _sample_prior( fidelity_level: Literal["min"] | Literal["max"], ) -> dict[str, Any]: # TODO: [lum] have a CenterSampler as fallback, not Random - _try_always_priors_sampler = new_space.PriorOrFallbackSampler( - fallback_sampler=new_space.RandomSampler(predefined_samplings={}), + _try_always_priors_sampler = neps_space.PriorOrFallbackSampler( + fallback_sampler=neps_space.RandomSampler(predefined_samplings={}), prior_use_probability=1, ) @@ -147,47 +158,63 @@ def _sample_prior( else: raise ValueError(f"Invalid fidelity level {fidelity_level}") - _resolved_pipeline, resolution_context = new_space.resolve( + _resolved_pipeline, resolution_context = neps_space.resolve( pipeline=self.space, domain_sampler=_try_always_priors_sampler, environment_values=_environment_values, ) - config = new_space.NepsCompatConverter.to_neps_config(resolution_context) + config = neps_space.NepsCompatConverter.to_neps_config(resolution_context) return dict(**config) def _convert_to_another_rung( self, - config: dict[str, Any], + config: Mapping[str, Any], rung: int, ) -> dict[str, Any]: - data = new_space.NepsCompatConverter.from_neps_config(config=config) + data = neps_space.NepsCompatConverter.from_neps_config(config=config) _environment_values = {} _fidelity_attrs = self.space.fidelity_attrs assert len(_fidelity_attrs) == 1, "TODO: [lum]" - for fidelity_name, fidelity_obj in _fidelity_attrs.items(): + for fidelity_name, _fidelity_obj in _fidelity_attrs.items(): _environment_values[fidelity_name] = self.rung_to_fid[rung] - _resolved_pipeline, resolution_context = new_space.resolve( + _resolved_pipeline, resolution_context = neps_space.resolve( pipeline=self.space, - domain_sampler=new_space.OnlyPredefinedValuesSampler( + domain_sampler=neps_space.OnlyPredefinedValuesSampler( predefined_samplings=data.predefined_samplings, ), environment_values=_environment_values, ) - config = new_space.NepsCompatConverter.to_neps_config(resolution_context) + config = neps_space.NepsCompatConverter.to_neps_config(resolution_context) return dict(**config) def priorband( - space: new_space.Pipeline, + space: neps_space.Pipeline, *, eta: int = 3, sample_prior_first: bool | Literal["highest_fidelity"] = False, base: Literal["successive_halving", "hyperband", "asha", "async_hb"] = "hyperband", ) -> _BracketOptimizer: + """Create a PriorBand optimizer for the given pipeline space. + + Args: + space: The pipeline space to optimize over. + eta: The eta parameter for the algorithm. + sample_prior_first: Whether to sample the prior first. + If set to `"highest_fidelity"`, the prior will be sampled at the + highest fidelity, otherwise at the lowest fidelity. + base: The type of bracket optimizer to use. One of: + - "successive_halving" + - "hyperband" + - "asha" + - "async_hb" + Returns: + An instance of _BracketOptimizer configured for PriorBand sampling. + """ return _bracket_optimizer( pipeline_space=space, bracket_type=base, @@ -198,8 +225,8 @@ def priorband( ) -def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 - pipeline_space: new_space.Pipeline, +def _bracket_optimizer( + pipeline_space: neps_space.Pipeline, *, bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], eta: int, @@ -207,16 +234,20 @@ def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 sample_prior_first: bool | Literal["highest_fidelity"], early_stopping_rate: int | None, ) -> _BracketOptimizer: - fidelity_attrs = pipeline_space.fidelity_attrs if len(fidelity_attrs) != 1: - raise ValueError("Only one fidelity should be defined in the pipeline space." f"\nGot: {fidelity_attrs!r}") + raise ValueError( + "Only one fidelity should be defined in the pipeline space." + f"\nGot: {fidelity_attrs!r}" + ) - fidelity_name, fidelity_obj = list(fidelity_attrs.items())[0] + fidelity_name, fidelity_obj = next(iter(fidelity_attrs.items())) if sample_prior_first not in (True, False, "highest_fidelity"): - raise ValueError("sample_prior_first should be either True, False or 'highest_fidelity'") + raise ValueError( + "sample_prior_first should be either True, False or 'highest_fidelity'" + ) from neps.optimizers.utils import brackets @@ -281,7 +312,9 @@ def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 _sampler = PriorBandSampler( space=pipeline_space, eta=eta, - early_stopping_rate=(early_stopping_rate if early_stopping_rate is not None else 0), + early_stopping_rate=( + early_stopping_rate if early_stopping_rate is not None else 0 + ), fid_bounds=(fidelity_obj.min_value, fidelity_obj.max_value), ) case _: diff --git a/neps/space/new_space/config_string.py b/neps/space/neps_spaces/config_string.py similarity index 65% rename from neps/space/new_space/config_string.py rename to neps/space/neps_spaces/config_string.py index 6399fbbf7..8e363f581 100644 --- a/neps/space/new_space/config_string.py +++ b/neps/space/neps_spaces/config_string.py @@ -1,28 +1,46 @@ +"""This module provides functionality to unwrap and wrap configuration strings +used in NePS spaces. It defines the `UnwrappedConfigStringPart` data class +to represent parts of the unwrapped configuration string and provides +functions to unwrap a configuration string into these parts and to wrap +unwrapped parts back into a configuration string. +""" + from __future__ import annotations import dataclasses import functools +from collections.abc import Callable +from typing import Any @dataclasses.dataclass(frozen=True) class UnwrappedConfigStringPart: + """A data class representing a part of an unwrapped configuration string. + :param level: The hierarchy level of this part in the configuration string. + :param opening_index: The index of the opening parenthesis in the original string. + :param operator: The operator of this part, which is the first word in the + parenthesis. + :param hyperparameters: The hyperparameters of this part, if any, enclosed in curly + braces. + :param operands: The operands of this part, which are the remaining content in the + parenthesis. + """ + level: int opening_index: int - operator: str + operator: str | Callable[..., Any] hyperparameters: str operands: str @functools.lru_cache(maxsize=2000) -def unwrap_config_string(config_string: str) -> tuple[UnwrappedConfigStringPart]: - """ - For a given config string, gets the parenthetic contents of it +def unwrap_config_string(config_string: str) -> tuple[UnwrappedConfigStringPart, ...]: + """For a given config string, gets the parenthetic contents of it and uses them to construct objects of type `UnwrappedConfigStringPart`. First unwraps a given parenthesised config_string into parts. Then it converts these parts into objects with structured information. """ - # A workaround needed since in the existing configurations # generated by previous methods, e.g. the `resBlock resBlock` and `resBlock` items # occur without wrapping parenthesis, differently from other items. @@ -53,13 +71,10 @@ def unwrap_config_string(config_string: str) -> tuple[UnwrappedConfigStringPart] start_char_index, opening_index = stack.pop() level = len(stack) + 1 # start level counting from 1 and not 0 - value = config_string[start_char_index + 1 : current_char_index] - value = value.split(" (", maxsplit=1) + value_single = config_string[start_char_index + 1 : current_char_index] + value = value_single.split(" (", maxsplit=1) operator = value[0] - if len(value) > 1: - operands = "(" + value[1] - else: - operands = "" + operands = "(" + value[1] if len(value) > 1 else "" if " {" in operator: operator, hyperparameters = operator.split(" {") @@ -77,8 +92,7 @@ def unwrap_config_string(config_string: str) -> tuple[UnwrappedConfigStringPart] result.append(item) assert not stack, f"For '(' found no matching ')': Index: {stack[0][0]}" - result = tuple(sorted(result, key=lambda x: x.opening_index)) - return result + return tuple(sorted(result, key=lambda x: x.opening_index)) # Current profiling shows this function does not run that often @@ -87,12 +101,11 @@ def wrap_config_into_string( unwrapped_config: tuple[UnwrappedConfigStringPart, ...], max_level: int | None = None, ) -> str: - """ - For a given unwrapped config, returns the string representing it. + """For a given unwrapped config, returns the string representing it. :param unwrapped_config: The unwrapped config :param max_level: An optional int telling which is the maximal considered level. - Bigger levels are ignored + Bigger levels are ignored. """ result = [] current_level = 0 @@ -113,7 +126,7 @@ def wrap_config_into_string( result.append(value) result.append(")" * current_level) - result = "".join(result).strip() + result_string = "".join(result).strip() # A workaround needed since in the existing configurations # generated by previous methods, e.g. the `resBlock resBlock` and `resBlock` items @@ -126,30 +139,46 @@ def wrap_config_into_string( ("id", False), ] for op, replace_individual in replacements: - result = result.replace(f"({op} {op})", "__TMP_PLACEHOLDER___") + result_string = result_string.replace(f"({op} {op})", "__TMP_PLACEHOLDER___") if replace_individual: - result = result.replace(f"({op})", f"{op}") - result = result.replace("__TMP_PLACEHOLDER___", f"{op} {op}") + result_string = result_string.replace(f"({op})", f"{op}") + result_string = result_string.replace("__TMP_PLACEHOLDER___", f"{op} {op}") - return result + return result_string class ConfigString: + """A class representing a configuration string in NePS spaces. + It provides methods to unwrap the configuration string into structured parts, + retrieve the maximum hierarchy level, and get a representation of the configuration + at a specific hierarchy level. + """ + def __init__(self, config_string: str) -> None: + """Initialize the ConfigString with a given configuration string. + :param config_string: The configuration string to be wrapped. + :raises ValueError: If the config_string is None or empty. + """ if config_string is None or len(config_string) == 0: raise ValueError(f"Invalid config string: {config_string}") self.config_string = config_string # The fields below are needed for lazy and cached evaluation. # In python 3.8+ can be replaced by `cached_property` - self._unwrapped: tuple[UnwrappedConfigStringPart] | None = None + self._unwrapped: tuple[UnwrappedConfigStringPart, ...] | None = None self._max_hierarchy_level: int | None = None # a cache for the different hierarchy levels of this config string self._at_hierarchy_level_cache: dict[int, ConfigString] = {} @property - def unwrapped(self) -> tuple[UnwrappedConfigStringPart]: + def unwrapped(self) -> tuple[UnwrappedConfigStringPart, ...]: + """Get the unwrapped representation of the configuration string. + :return: A tuple of UnwrappedConfigStringPart objects representing the unwrapped + config. + :raises ValueError: If there is an error unwrapping the config string. + """ + # If the unwrapped is already cached, return it if self._unwrapped is not None: return self._unwrapped @@ -172,33 +201,39 @@ def unwrapped(self) -> tuple[UnwrappedConfigStringPart]: @property def max_hierarchy_level(self) -> int: + """Get the maximum hierarchy level of the configuration string. + :return: The maximum hierarchy level of the configuration string. + :raises ValueError: If the maximum hierarchy level is invalid. + """ if self._max_hierarchy_level is not None: return self._max_hierarchy_level max_hierarchy_level = max(i.level for i in self.unwrapped) - assert max_hierarchy_level > 0, f"Invalid max hierarchy level: {self.max_hierarchy_level}" + assert max_hierarchy_level > 0, ( + f"Invalid max hierarchy level: {self.max_hierarchy_level}" + ) self._max_hierarchy_level = max_hierarchy_level return self._max_hierarchy_level def at_hierarchy_level(self, level: int) -> ConfigString: - """ - Get a representation of this config at the chosen hierarchy level. - :param level: - When >0, get the config the that hierarchy level. - When <0, get the config at (max_hierarchy_level - level + 1), - similar to negative python indices in e.g. lists. - :return: + """Get the configuration string at a specific hierarchy level. + :param level: The hierarchy level to retrieve the configuration string for. + :return: A ConfigString object representing the configuration at the specified + hierarchy level. + :raises ValueError: If the level is invalid (0 or out of bounds). """ if level == 0: raise ValueError(f"Invalid value for `level`. Received level == 0: {level}") if level > self.max_hierarchy_level: raise ValueError( - "Invalid value for `level`. " + f"level>max_hierarchy_level: {level}>{self.max_hierarchy_level}" + "Invalid value for `level`. " + + f"level>max_hierarchy_level: {level}>{self.max_hierarchy_level}" ) if level < -self.max_hierarchy_level: raise ValueError( - "Invalid value for `level`. " + f"level<-max_hierarchy_level: {level}<-{self.max_hierarchy_level}" + "Invalid value for `level`. " + + f"level<-max_hierarchy_level: {level}<-{self.max_hierarchy_level}" ) if level < 0: @@ -209,14 +244,22 @@ def at_hierarchy_level(self, level: int) -> ConfigString: if level in self._at_hierarchy_level_cache: return self._at_hierarchy_level_cache[level] - config_string_at_hierarchy_level = wrap_config_into_string(unwrapped_config=self.unwrapped, max_level=level) + config_string_at_hierarchy_level = wrap_config_into_string( + unwrapped_config=self.unwrapped, max_level=level + ) config_at_hierarchy_level = ConfigString(config_string_at_hierarchy_level) self._at_hierarchy_level_cache[level] = config_at_hierarchy_level return self._at_hierarchy_level_cache[level] def pretty_format(self) -> str: - format_str_with_kwargs = "{indent}{item.level:0>2d} :: {item.operator} {item.hyperparameters}" + """Get a pretty formatted string representation of the configuration string. + :return: A string representation of the configuration string with indentation + based on the hierarchy level of each part. + """ + format_str_with_kwargs = ( + "{indent}{item.level:0>2d} :: {item.operator} {item.hyperparameters}" + ) format_str_no_kwargs = "{indent}{item.level:0>2d} :: {item.operator}" lines = [self.config_string] for item in self.unwrapped: diff --git a/neps/space/new_space/space.py b/neps/space/neps_spaces/neps_space.py similarity index 53% rename from neps/space/new_space/space.py rename to neps/space/neps_spaces/neps_space.py index 3d85a6745..56975c2bd 100644 --- a/neps/space/new_space/space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -1,32 +1,37 @@ +"""This module defines various classes and protocols for representing and manipulating +search spaces in NePS (Neural Parameter Search). It includes definitions for domains, +pipelines, operations, and fidelity, as well as utilities for sampling and resolving +search spaces. +""" + from __future__ import annotations import abc +import contextlib import dataclasses +import enum import functools import heapq -import random import math -import enum -import contextlib +import random +from collections.abc import Callable, Generator, Mapping, Sequence from typing import ( - TypeVar, - Generic, - Sequence, + TYPE_CHECKING, Any, + Generic, Protocol, - runtime_checkable, + TypeVar, cast, - Callable, - Mapping, - Generator, - Type, + runtime_checkable, ) -import neps.space.new_space.config_string as config_string -import neps.optimizers.optimizer as optimizer -import neps.state.trial as trial_state -import neps.state.optimizer as optimizer_state +from neps.optimizers import optimizer +from neps.space.neps_spaces import config_string +if TYPE_CHECKING: + import neps.state.optimizer as optimizer_state + import neps.state.trial as trial_state + from neps.state.trial import Trial T = TypeVar("T") P = TypeVar("P", bound="Pipeline") @@ -47,26 +52,45 @@ class _Unset: @runtime_checkable class Resolvable(Protocol): + """A protocol for objects that can be resolved into attributes.""" + def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the resolvable object as a mapping.""" raise NotImplementedError() def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: + """Create a new resolvable object from the given attributes.""" raise NotImplementedError() def resolvable_is_fully_resolved(resolvable: Resolvable) -> bool: + """Check if a resolvable object is fully resolved. + A resolvable object is considered fully resolved if all its attributes are either + not instances of Resolvable or are themselves fully resolved. + """ attr_objects = resolvable.get_attrs().values() - return all(not isinstance(obj, Resolvable) or resolvable_is_fully_resolved(obj) for obj in attr_objects) + return all( + not isinstance(obj, Resolvable) or resolvable_is_fully_resolved(obj) + for obj in attr_objects + ) @runtime_checkable class DomainSampler(Protocol): + """A protocol for domain samplers that can sample from a given domain.""" + def __call__( self, *, domain_obj: Domain[T], current_path: str, ) -> T: + """Sample a value from the given domain. + :param domain_obj: The domain object to sample from. + :param current_path: The current path in the resolution context. + :return: A sampled value of type T from the domain. + :raises NotImplementedError: If the method is not implemented. + """ raise NotImplementedError() @@ -74,11 +98,25 @@ def __call__( class Pipeline(Resolvable): + """A class representing a pipeline in NePS spaces. + It contains attributes that can be resolved into a configuration string, + and it can be used to sample configurations based on defined domains. + """ + @property def fidelity_attrs(self) -> Mapping[str, Fidelity]: + """Get the fidelity attributes of the pipeline. Fidelity attributes are special + attributes that represent the fidelity of the pipeline. + :return: A mapping of fidelity attribute names to Fidelity objects. + """ return {k: v for k, v in self.get_attrs().items() if isinstance(v, Fidelity)} def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the pipeline as a mapping. + This method collects all attributes of the pipeline class and instance, + excluding private attributes and methods, and returns them as a dictionary. + :return: A mapping of attribute names to their values. + """ attrs = {} for attr_name, attr_value in vars(self.__class__).items(): @@ -98,6 +136,12 @@ def get_attrs(self) -> Mapping[str, Any]: return attrs def from_attrs(self, attrs: Mapping[str, Any]) -> Pipeline: + """Create a new Pipeline instance from the given attributes. + :param attrs: A mapping of attribute names to their values. + :return: A new Pipeline instance with the specified attributes. + :raises ValueError: If the attributes do not match the pipeline's expected + structure. + """ new_pipeline = Pipeline() for name, value in attrs.items(): setattr(new_pipeline, name, value) @@ -105,44 +149,67 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Pipeline: class ConfidenceLevel(enum.Enum): + """Enum representing confidence levels for sampling.""" + LOW = "low" MEDIUM = "medium" HIGH = "high" class Domain(Resolvable, abc.ABC, Generic[T]): + """An abstract base class representing a domain in NePS spaces. + It defines the properties and methods that all domains must implement, + such as min and max values, sampling, and centered domains. + """ + @property @abc.abstractmethod def min_value(self) -> T: + """Get the minimum value of the domain.""" raise NotImplementedError() @property @abc.abstractmethod def max_value(self) -> T: + """Get the maximum value of the domain.""" raise NotImplementedError() @property @abc.abstractmethod def has_prior(self) -> bool: + """Check if the domain has a prior defined.""" raise NotImplementedError() @property @abc.abstractmethod def prior(self) -> T: + """Get the prior value of the domain. + Raises ValueError if the domain has no prior defined. + """ raise NotImplementedError() @property @abc.abstractmethod def prior_confidence(self) -> ConfidenceLevel: + """Get the confidence level of the prior. + Raises ValueError if the domain has no prior defined. + """ raise NotImplementedError() @property @abc.abstractmethod def range_compatibility_identifier(self) -> str: + """Get a string identifier for the range compatibility of the domain. + This identifier is used to check if two domains are compatible based on their + ranges. + """ raise NotImplementedError() @abc.abstractmethod def sample(self) -> T: + """Sample a value from the domain. + Returns a value of type T that is within the domain's range. + """ raise NotImplementedError() @abc.abstractmethod @@ -151,24 +218,45 @@ def centered_around( center: T, confidence: ConfidenceLevel, ) -> Domain[T]: + """Create a new domain centered around a given value with a specified confidence + level. + :param center: The value around which to center the new domain. + :param confidence: The confidence level for the new domain. + :return: A new Domain instance that is centered around the specified value. + :raises ValueError: If the center value is not within the domain's range. + """ raise NotImplementedError() def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the domain as a mapping. + This method collects all attributes of the domain class and instance, + excluding private attributes and methods, and returns them as a dictionary. + :return: A mapping of attribute names to their values. + """ return {k.lstrip("_"): v for k, v in vars(self).items()} def from_attrs(self, attrs: Mapping[str, Any]) -> Domain[T]: + """Create a new Domain instance from the given attributes. + :param attrs: A mapping of attribute names to their values. + :return: A new Domain instance with the specified attributes. + :raises ValueError: If the attributes do not match the domain's expected + structure. + """ return type(self)(**attrs) def _calculate_new_domain_bounds( - number_type: Type[int] | Type[float], + number_type: type[int] | type[float], min_value: int | float, max_value: int | float, center: int | float, confidence: ConfidenceLevel, ) -> tuple[int, int] | tuple[float, float]: if center < min_value or center > max_value: - raise ValueError(f"Center value {center!r} must be within domain range [{min_value!r}, {max_value!r}]") + raise ValueError( + f"Center value {center!r} must be within domain range [{min_value!r}," + f" {max_value!r}]" + ) # Determine a chunk size by splitting the domain range into a fixed number of chunks. # Then use the confidence level to decide how many chunks to include @@ -178,8 +266,8 @@ def _calculate_new_domain_bounds( chunk_size = (max_value - min_value) / number_of_chunks # The numbers refer to how many segments to have on each side of the center. - # TODO: [lum] we need to make sure that in the end the range does not just have the center, - # but at least a little bit more around it too. + # TODO: [lum] we need to make sure that in the end the range does not just have the + # center, but at least a little bit more around it too. confidence_to_number_of_chunks_on_each_side = { ConfidenceLevel.HIGH: 1.0, ConfidenceLevel.MEDIUM: 2.5, @@ -203,12 +291,26 @@ def _calculate_new_domain_bounds( class Categorical(Domain[int], Generic[T]): + """A domain representing a categorical choice from a set of options. + It allows for sampling from a predefined set of choices and can be centered around + a specific choice with a given confidence level. + :param choices: A tuple of choices or a Domain of choices. + :param prior_index: The index of the prior choice in the choices tuple. + :param prior_confidence: The confidence level of the prior choice. + """ + def __init__( self, choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T], prior_index: int | Domain[int] | _Unset = _UNSET, prior_confidence: ConfidenceLevel | _Unset = _UNSET, ): + """Initialize the Categorical domain with choices and optional prior. + :param choices: A tuple of choices or a Domain of choices. + :param prior_index: The index of the prior choice in the choices tuple. + :param prior_confidence: The confidence level of the prior choice. + :raises ValueError: If the choices are empty or prior_index is out of bounds. + """ self._choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T] if isinstance(choices, Sequence): self._choices = tuple(choice for choice in choices) @@ -219,37 +321,64 @@ def __init__( @property def min_value(self) -> int: + """Get the minimum value of the categorical domain. + :return: The minimum index of the choices, which is always 0. + """ return 0 @property def max_value(self) -> int: + """Get the maximum value of the categorical domain. + :return: The maximum index of the choices, which is the length of choices minus 1. + """ return max(len(cast(tuple, self._choices)) - 1, 0) @property def choices(self) -> tuple[T | Domain[T] | Resolvable, ...] | Domain[T]: + """Get the choices available in the categorical domain. + :return: A tuple of choices or a Domain of choices. + """ return self._choices @property def has_prior(self) -> bool: + """Check if the categorical domain has a prior defined. + :return: True if the prior index and confidence are set, False otherwise. + """ return self._prior_index is not _UNSET and self._prior_confidence is not _UNSET @property def prior(self) -> int: + """Get the prior index of the categorical domain. + :return: The index of the prior choice in the choices tuple. + :raises ValueError: If the domain has no prior defined. + """ if not self.has_prior: raise ValueError("Domain has no prior defined.") return int(cast(int, self._prior_index)) @property def prior_confidence(self) -> ConfidenceLevel: + """Get the confidence level of the prior choice. + :return: The confidence level of the prior choice. + :raises ValueError: If the domain has no prior defined. + """ if not self.has_prior: raise ValueError("Domain has no prior defined.") return cast(ConfidenceLevel, self._prior_confidence) @property def range_compatibility_identifier(self) -> str: + """Get a string identifier for the range compatibility of the categorical domain. + :return: A string representation of the number of choices in the domain. + """ return f"{len(cast(tuple, self._choices))}" def sample(self) -> int: + """Sample a random index from the categorical choices. + :return: A randomly selected index from the choices tuple. + :raises ValueError: If the choices are empty. + """ return int(random.randint(0, len(cast(tuple[T], self._choices)) - 1)) def centered_around( @@ -257,6 +386,13 @@ def centered_around( center: int, confidence: ConfidenceLevel, ) -> Categorical: + """Create a new categorical domain centered around a specific choice index. + :param center: The index of the choice around which to center the new domain. + :param confidence: The confidence level for the new domain. + :return: A new Categorical instance with a range centered around the specified + choice index. + :raises ValueError: If the center index is out of bounds of the choices. + """ new_min, new_max = cast( tuple[int, int], _calculate_new_domain_bounds( @@ -276,14 +412,32 @@ def centered_around( class Float(Domain[float]): + """A domain representing a continuous range of floating-point values. + It allows for sampling from a range defined by minimum and maximum values, + and can be centered around a specific value with a given confidence level. + :param min_value: The minimum value of the domain. + :param max_value: The maximum value of the domain. + :param log: Whether to sample values on a logarithmic scale. + :param prior: The prior value for the domain, if any. + :param prior_confidence: The confidence level of the prior value. + """ + def __init__( self, min_value: float, max_value: float, - log: bool = False, + log: bool = False, # noqa: FBT001, FBT002 prior: float | _Unset = _UNSET, prior_confidence: ConfidenceLevel | _Unset = _UNSET, ): + """Initialize the Float domain with min and max values, and optional prior. + :param min_value: The minimum value of the domain. + :param max_value: The maximum value of the domain. + :param log: Whether to sample values on a logarithmic scale. + :param prior: The prior value for the domain, if any. + :param prior_confidence: The confidence level of the prior value. + :raises ValueError: If min_value is greater than max_value. + """ self._min_value = min_value self._max_value = max_value self._log = log @@ -292,33 +446,61 @@ def __init__( @property def min_value(self) -> float: + """Get the minimum value of the floating-point domain. + :return: The minimum value of the domain. + :raises ValueError: If min_value is greater than max_value. + """ return self._min_value @property def max_value(self) -> float: + """Get the maximum value of the floating-point domain. + :return: The maximum value of the domain. + :raises ValueError: If min_value is greater than max_value. + """ return self._max_value @property def has_prior(self) -> bool: + """Check if the floating-point domain has a prior defined. + :return: True if the prior and prior confidence are set, False otherwise. + """ return self._prior is not _UNSET and self._prior_confidence is not _UNSET @property def prior(self) -> float: + """Get the prior value of the floating-point domain. + :return: The prior value of the domain. + :raises ValueError: If the domain has no prior defined. + """ if not self.has_prior: raise ValueError("Domain has no prior defined.") return float(cast(float, self._prior)) @property def prior_confidence(self) -> ConfidenceLevel: + """Get the confidence level of the prior value. + :return: The confidence level of the prior value. + :raises ValueError: If the domain has no prior defined. + """ if not self.has_prior: raise ValueError("Domain has no prior defined.") return cast(ConfidenceLevel, self._prior_confidence) @property def range_compatibility_identifier(self) -> str: + """Get a string identifier for the range compatibility of the floating-point + domain. + :return: A string representation of the minimum and maximum values, and whether + the domain is logarithmic. + """ return f"{self._min_value}_{self._max_value}_{self._log}" def sample(self) -> float: + """Sample a random floating-point value from the domain. + :return: A randomly selected floating-point value within the domain's range. + :raises ValueError: If min_value is greater than max_value. + """ if self._log: log_min = math.log(self._min_value) log_max = math.log(self._max_value) @@ -330,6 +512,12 @@ def centered_around( center: float, confidence: ConfidenceLevel, ) -> Float: + """Create a new floating-point domain centered around a specific value. + :param center: The value around which to center the new domain. + :param confidence: The confidence level for the new domain. + :return: A new Float instance that is centered around the specified value. + :raises ValueError: If the center value is not within the domain's range. + """ new_min, new_max = _calculate_new_domain_bounds( number_type=float, min_value=self.min_value, @@ -347,14 +535,32 @@ def centered_around( class Integer(Domain[int]): + """A domain representing a range of integer values. + It allows for sampling from a range defined by minimum and maximum values, + and can be centered around a specific value with a given confidence level. + :param min_value: The minimum value of the domain. + :param max_value: The maximum value of the domain. + :param log: Whether to sample values on a logarithmic scale. + :param prior: The prior value for the domain, if any. + :param prior_confidence: The confidence level of the prior value. + """ + def __init__( self, min_value: int, max_value: int, - log: bool = False, + log: bool = False, # noqa: FBT001, FBT002 prior: int | _Unset = _UNSET, prior_confidence: ConfidenceLevel | _Unset = _UNSET, ): + """Initialize the Integer domain with min and max values, and optional prior. + :param min_value: The minimum value of the domain. + :param max_value: The maximum value of the domain. + :param log: Whether to sample values on a logarithmic scale. + :param prior: The prior value for the domain, if any. + :param prior_confidence: The confidence level of the prior value. + :raises ValueError: If min_value is greater than max_value. + """ self._min_value = min_value self._max_value = max_value self._log = log @@ -363,33 +569,61 @@ def __init__( @property def min_value(self) -> int: + """Get the minimum value of the integer domain. + :return: The minimum value of the domain. + :raises ValueError: If min_value is greater than max_value. + """ return self._min_value @property def max_value(self) -> int: + """Get the maximum value of the integer domain. + :return: The maximum value of the domain. + :raises ValueError: If min_value is greater than max_value. + """ return self._max_value @property def has_prior(self) -> bool: + """Check if the integer domain has a prior defined. + :return: True if the prior and prior confidence are set, False otherwise. + """ return self._prior is not _UNSET and self._prior_confidence is not _UNSET @property def prior(self) -> int: + """Get the prior value of the integer domain. + :return: The prior value of the domain. + :raises ValueError: If the domain has no prior defined. + """ if not self.has_prior: raise ValueError("Domain has no prior defined.") return int(cast(int, self._prior)) @property def prior_confidence(self) -> ConfidenceLevel: + """Get the confidence level of the prior value. + :return: The confidence level of the prior value. + :raises ValueError: If the domain has no prior defined. + """ if not self.has_prior: raise ValueError("Domain has no prior defined.") return cast(ConfidenceLevel, self._prior_confidence) @property def range_compatibility_identifier(self) -> str: + """Get a string identifier for the range compatibility of the integer domain. + :return: A string representation of the minimum and maximum values, and whether + the domain is logarithmic. + """ return f"{self._min_value}_{self._max_value}_{self._log}" def sample(self) -> int: + """Sample a random integer value from the domain. + :return: A randomly selected integer value within the domain's range. + :raises NotImplementedError: If the domain is set to sample on a logarithmic + scale, as this is not implemented yet. + """ if self._log: raise NotImplementedError("TODO.") return int(random.randint(self._min_value, self._max_value)) @@ -399,6 +633,12 @@ def centered_around( center: int, confidence: ConfidenceLevel, ) -> Integer: + """Create a new integer domain centered around a specific value. + :param center: The value around which to center the new domain. + :param confidence: The confidence level for the new domain. + :return: A new Integer instance that is centered around the specified value. + :raises ValueError: If the center value is not within the domain's range. + """ new_min, new_max = cast( tuple[int, int], _calculate_new_domain_bounds( @@ -419,17 +659,34 @@ def centered_around( class Operation(Resolvable): + """A class representing an operation in a NePS space. + It encapsulates an operator (a callable or a string), arguments, and keyword + arguments. + The operator can be a function or a string representing a function name. + :param operator: The operator to be used in the operation, can be a callable or a + string. + :param args: A sequence of arguments to be passed to the operator. + :param kwargs: A mapping of keyword arguments to be passed to the operator. + """ + def __init__( self, operator: Callable | str, args: Sequence[Any] | Resolvable | None = None, kwargs: Mapping[str, Any] | Resolvable | None = None, ): + """Initialize the Operation with an operator, arguments, and keyword arguments. + :param operator: The operator to be used in the operation, can be a callable or a + string. + :param args: A sequence of arguments to be passed to the operator. + :param kwargs: A mapping of keyword arguments to be passed to the operator. + :raises ValueError: If the operator is not callable or a string. + """ self._operator = operator self._args: tuple[Any, ...] | Resolvable if not isinstance(args, Resolvable): - self._args = tuple(args) if args else tuple() + self._args = tuple(args) if args else () else: self._args = args @@ -441,33 +698,56 @@ def __init__( @property def operator(self) -> Callable | str: + """Get the operator of the operation. + :return: The operator, which can be a callable or a string. + :raises ValueError: If the operator is not callable or a string. + """ return self._operator @property def args(self) -> tuple[Any, ...]: + """Get the arguments of the operation. + :return: A tuple of arguments to be passed to the operator. + :raises ValueError: If the args are not a tuple or Resolvable. + """ return cast(tuple[Any, ...], self._args) @property def kwargs(self) -> Mapping[str, Any]: + """Get the keyword arguments of the operation. + :return: A mapping of keyword arguments to be passed to the operator. + :raises ValueError: If the kwargs are not a mapping or Resolvable. + """ return cast(Mapping[str, Any], self._kwargs) def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the operation as a mapping. + This method collects all attributes of the operation class and instance, + excluding private attributes and methods, and returns them as a dictionary. + :return: A mapping of attribute names to their values. + """ # TODO: [lum] simplify this. We know the fields. Maybe other places too. result: dict[str, Any] = {} for name, value in vars(self).items(): - name = name.lstrip("_") + stripped_name = name.lstrip("_") if isinstance(value, dict): for k, v in value.items(): # Multiple {{}} needed to escape surrounding '{' and '}'. - result[f"{name}{{{k}}}"] = v + result[f"{stripped_name}{{{k}}}"] = v elif isinstance(value, tuple): for i, v in enumerate(value): - result[f"{name}[{i}]"] = v + result[f"{stripped_name}[{i}]"] = v else: - result[name] = value + result[stripped_name] = value return result def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: + """Create a new Operation instance from the given attributes. + :param attrs: A mapping of attribute names to their values. + :return: A new Operation instance with the specified attributes. + :raises ValueError: If the attributes do not match the operation's expected + structure. + """ # TODO: [lum] simplify this. We know the fields. Maybe other places too. final_attrs: dict[str, Any] = {} for name, value in attrs.items(): @@ -485,50 +765,115 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: class Resampled(Resolvable): + """A class representing a resampling operation in a NePS space. + It can either be a resolvable object or a string representing a resampling by name. + :param source: The source of the resampling, can be a resolvable object or a string. + """ + def __init__(self, source: Resolvable | str): + """Initialize the Resampled object with a source. + :param source: The source of the resampling, which can be a resolvable object or + a string. + :raises ValueError: If the source is not a resolvable object or a string. + """ self._source = source @property def source(self) -> Resolvable | str: + """Get the source of the resampling. + :return: The source of the resampling, which can be a resolvable object or a + string. + """ return self._source @property def is_resampling_by_name(self) -> bool: + """Check if the resampling is by name. + :return: True if the source is a string, False otherwise. + """ return isinstance(self._source, str) def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the resampling source as a mapping. + :return: A mapping of attribute names to their values. + :raises ValueError: If the resampling is by name or the source is not resolvable. + """ if self.is_resampling_by_name: - raise ValueError(f"This is a resampling by name, can't get attrs from it: {self.source!r}.") + raise ValueError( + f"This is a resampling by name, can't get attrs from it: {self.source!r}." + ) if not isinstance(self._source, Resolvable): - raise ValueError(f"Source should be a resolvable object. Is: {self._source!r}.") + raise ValueError( + f"Source should be a resolvable object. Is: {self._source!r}." + ) return self._source.get_attrs() def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: + """Create a new resolvable object from the given attributes. + :param attrs: A mapping of attribute names to their values. + :return: A new resolvable object created from the specified attributes. + :raises ValueError: If the resampling is by name or the source is not resolvable. + """ if self.is_resampling_by_name: - raise ValueError(f"This is a resampling by name, can't create object for it: {self.source!r}.") + raise ValueError( + "This is a resampling by name, can't create object for it:" + f" {self.source!r}." + ) if not isinstance(self._source, Resolvable): - raise ValueError(f"Source should be a resolvable object. Is: {self._source!r}.") + raise ValueError( + f"Source should be a resolvable object. Is: {self._source!r}." + ) return self._source.from_attrs(attrs) class Fidelity(Resolvable, Generic[T]): + """A class representing a fidelity in a NePS space. + It encapsulates a domain that defines the range of values for the fidelity. + :param domain: The domain of the fidelity, which can be an Integer or Float domain. + :raises ValueError: If the domain has a prior defined, as fidelity domains should not + have priors. + """ + def __init__(self, domain: Integer | Float): + """Initialize the Fidelity with a domain. + :param domain: The domain of the fidelity, which can be an Integer or Float + domain. + :raises ValueError: If the domain has a prior defined, as fidelity domains should + not have priors. + """ if domain.has_prior: raise ValueError(f"The domain of a Fidelity can not have priors: {domain!r}.") self._domain = domain @property def min_value(self) -> int | float: + """Get the minimum value of the fidelity domain. + :return: The minimum value of the fidelity domain. + """ return self._domain.min_value @property def max_value(self) -> int | float: + """Get the maximum value of the fidelity domain. + :return: The maximum value of the fidelity domain. + """ return self._domain.max_value def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the fidelity as a mapping. + This method collects all attributes of the fidelity class and instance, + excluding private attributes and methods, and returns them as a dictionary. + :return: A mapping of attribute names to their values. + :raises ValueError: If the fidelity has no domain defined. + """ raise ValueError("For a Fidelity object there is nothing to resolve.") - def from_attrs(self, attrs: Mapping[str, Any]) -> Fidelity: + def from_attrs(self, attrs: Mapping[str, Any]) -> Fidelity: # noqa: ARG002 + """Create a new Fidelity instance from the given attributes. + :param attrs: A mapping of attribute names to their values. + :return: A new Fidelity instance with the specified attributes. + :raises ValueError: If the fidelity has no domain defined. + """ raise ValueError("For a Fidelity object there is nothing to resolve.") @@ -536,28 +881,55 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Fidelity: class OnlyPredefinedValuesSampler(DomainSampler): + """A sampler that only returns predefined values for a given path. + If the path is not found in the predefined values, it raises a ValueError. + :param predefined_samplings: A mapping of paths to predefined values. + """ + def __init__( self, predefined_samplings: Mapping[str, Any], ): + """Initialize the sampler with predefined samplings. + :param predefined_samplings: A mapping of paths to predefined values. + :raises ValueError: If predefined_samplings is empty. + """ self._predefined_samplings = predefined_samplings def __call__( self, *, - domain_obj: Domain[T], + domain_obj: Domain[T], # noqa: ARG002 current_path: str, ) -> T: + """Sample a value from the predefined samplings for the given path. + :param domain_obj: The domain object, not used in this sampler. + :param current_path: The path for which to sample a value. + :return: The predefined value for the given path. + :raises ValueError: If the current path is not in the predefined samplings. + """ if current_path not in self._predefined_samplings: raise ValueError(f"No predefined value for path: {current_path!r}.") return cast(T, self._predefined_samplings[current_path]) class RandomSampler(DomainSampler): + """A sampler that randomly samples from a predefined set of values. + If the current path is not in the predefined values, it samples from the domain. + :param predefined_samplings: A mapping of paths to predefined values. + This sampler will use these values if available, otherwise it will sample from the + domain. + """ + def __init__( self, predefined_samplings: Mapping[str, Any], ): + """Initialize the sampler with predefined samplings. + :param predefined_samplings: A mapping of paths to predefined values. + :raises + ValueError: If predefined_samplings is empty. + """ self._predefined_samplings = predefined_samplings def __call__( @@ -566,6 +938,14 @@ def __call__( domain_obj: Domain[T], current_path: str, ) -> T: + """Sample a value from the predefined samplings or the domain. + :param domain_obj: The domain object from which to sample. + :param current_path: The path for which to sample a value. + :return: A sampled value, either from the predefined samplings or from the + domain. + :raises ValueError: If the current path is not in the predefined samplings and + the domain does not have a prior defined. + """ if current_path not in self._predefined_samplings: sampled_value = domain_obj.sample() else: @@ -574,13 +954,34 @@ def __call__( class PriorOrFallbackSampler(DomainSampler): + """A sampler that uses a prior value if available, otherwise falls back to another + sampler. + :param fallback_sampler: A DomainSampler to use if the prior is not available. + :param prior_use_probability: The probability of using the prior value when + available. + This should be a float between 0 and 1, where 0 means never use the prior and 1 means + always use it. + :raises ValueError: If the prior_use_probability is not between 0 and 1. + """ + def __init__( self, fallback_sampler: DomainSampler, prior_use_probability: float, ): + """Initialize the sampler with a fallback sampler and a prior use probability. + :param fallback_sampler: A DomainSampler to use if the prior is not available. + :param prior_use_probability: The probability of using the prior value when + available. + This should be a float between 0 and 1, where 0 means never use the prior and 1 + means always use it. + :raises ValueError: If the prior_use_probability is not between 0 and 1. + """ if not 0 <= prior_use_probability <= 1: - raise ValueError(f"The given `prior_use_probability` value is out of range: {prior_use_probability!r}.") + raise ValueError( + "The given `prior_use_probability` value is out of range:" + f" {prior_use_probability!r}." + ) self._fallback_sampler = fallback_sampler self._prior_use_probability = prior_use_probability @@ -591,6 +992,14 @@ def __call__( domain_obj: Domain[T], current_path: str, ) -> T: + """Sample a value from the domain, using the prior if available and according to + the prior use probability. + :param domain_obj: The domain object from which to sample. + :param current_path: The path for which to sample a value. + :return: A sampled value, either from the prior or from the fallback sampler. + :raises ValueError: If the domain does not have a prior defined and the fallback + sampler is not provided. + """ use_prior = random.choices( (True, False), weights=(self._prior_use_probability, 1 - self._prior_use_probability), @@ -605,12 +1014,35 @@ def __call__( class MutateByForgettingSampler(DomainSampler): + """A sampler that mutates predefined samplings by forgetting a certain number of + them. It randomly selects a number of predefined samplings to forget and returns a + new sampler that only uses the remaining samplings. + :param predefined_samplings: A mapping of paths to predefined values. + :param n_forgets: The number of predefined samplings to forget. + This should be an integer greater than 0 and less than or equal to the number of + predefined samplings. + :raises ValueError: If n_forgets is not a valid integer or if it exceeds the number + of predefined samplings. + """ + def __init__( self, predefined_samplings: Mapping[str, Any], n_forgets: int, ): - if not isinstance(n_forgets, int) or n_forgets <= 0 or n_forgets > len(predefined_samplings): + """Initialize the sampler with predefined samplings and a number of forgets. + :param predefined_samplings: A mapping of paths to predefined values. + :param n_forgets: The number of predefined samplings to forget. + This should be an integer greater than 0 and less than or equal to the number of + predefined samplings. + :raises ValueError: If n_forgets is not a valid integer or if it exceeds the + number of predefined samplings. + """ + if ( + not isinstance(n_forgets, int) + or n_forgets <= 0 + or n_forgets > len(predefined_samplings) + ): raise ValueError(f"Invalid value for `n_forgets`: {n_forgets!r}.") mutated_samplings_to_make = _mutate_samplings_to_make_by_forgetting( @@ -628,16 +1060,46 @@ def __call__( domain_obj: Domain[T], current_path: str, ) -> T: + """Sample a value from the mutated predefined samplings or the domain. + :param domain_obj: The domain object from which to sample. + :param current_path: The path for which to sample a value. + :return: A sampled value, either from the mutated predefined samplings or from + the domain. + :raises ValueError: If the current path is not in the mutated predefined + samplings and the domain does not have a prior defined. + """ return self._random_sampler(domain_obj=domain_obj, current_path=current_path) class MutatateUsingCentersSampler(DomainSampler): + """A sampler that mutates predefined samplings by forgetting a certain number of them, + but still uses the original values as centers for sampling. + :param predefined_samplings: A mapping of paths to predefined values. + :param n_mutations: The number of predefined samplings to mutate. + This should be an integer greater than 0 and less than or equal to the number of + predefined samplings. + :raises ValueError: If n_mutations is not a valid integer or if it exceeds the number + of predefined samplings. + """ + def __init__( self, predefined_samplings: Mapping[str, Any], n_mutations: int, ): - if not isinstance(n_mutations, int) or n_mutations <= 0 or n_mutations > len(predefined_samplings): + """Initialize the sampler with predefined samplings and a number of mutations. + :param predefined_samplings: A mapping of paths to predefined values. + :param n_mutations: The number of predefined samplings to mutate. + This should be an integer greater than 0 and less than or equal to the number of + predefined samplings. + :raises ValueError: If n_mutations is not a valid integer or if it exceeds + the number of predefined samplings. + """ + if ( + not isinstance(n_mutations, int) + or n_mutations <= 0 + or n_mutations > len(predefined_samplings) + ): raise ValueError(f"Invalid value for `n_mutations`: {n_mutations!r}.") self._kept_samplings_to_make = _mutate_samplings_to_make_by_forgetting( @@ -654,6 +1116,15 @@ def __call__( domain_obj: Domain[T], current_path: str, ) -> T: + """Sample a value from the predefined samplings or the domain, using original + values as centers if the current path is not in the kept samplings. + :param domain_obj: The domain object from which to sample. + :param current_path: The path for which to sample a value. + :return: A sampled value, either from the kept samplings or from the domain, + using the original values as centers if necessary. + :raises ValueError: If the current path is not in the kept samplings and the + domain does not have a prior defined. + """ if current_path not in self._kept_samplings_to_make: # For this path we either have forgotten the value or we never had it. if current_path in self._original_samplings_to_make: @@ -674,18 +1145,48 @@ def __call__( class CrossoverNotPossibleError(Exception): - pass + """Exception raised when a crossover operation is not possible.""" class CrossoverByMixingSampler(DomainSampler): + """A sampler that performs a crossover operation by mixing two sets of predefined + samplings. It combines the predefined samplings from two sources, allowing for a + probability-based + selection of values from either source. + :param predefined_samplings_1: The first set of predefined samplings. + :param predefined_samplings_2: The second set of predefined samplings. + :param prefer_first_probability: The probability of preferring values from the first + set over the second set when both have values for the same path. + This should be a float between 0 and 1, where 0 means always prefer the second set + and 1 means always prefer the first set. + :raises ValueError: If prefer_first_probability is not between 0 and 1. + :raises CrossoverNotPossibleError: If no crossovers were made between the two sets + of predefined samplings. + """ + def __init__( self, predefined_samplings_1: Mapping[str, Any], predefined_samplings_2: Mapping[str, Any], prefer_first_probability: float, ): - if not isinstance(prefer_first_probability, float) or not (0 <= prefer_first_probability <= 1): - raise ValueError(f"Invalid value for `prefer_first_probability`: {prefer_first_probability!r}.") + """Initialize the sampler with two sets of predefined samplings and a preference + probability for the first set. + :param predefined_samplings_1: The first set of predefined samplings. + :param predefined_samplings_2: The second set of predefined samplings. + :param prefer_first_probability: The probability of preferring values from the + first set over the second set when both have values for the same path. + This should be a float between 0 and 1, where 0 means always prefer the second + set and 1 means always prefer the first set. + :raises ValueError: If prefer_first_probability is not between 0 and 1. + """ + if not isinstance(prefer_first_probability, float) or not ( + 0 <= prefer_first_probability <= 1 + ): + raise ValueError( + "Invalid value for `prefer_first_probability`:" + f" {prefer_first_probability!r}." + ) ( made_any_crossovers, @@ -709,6 +1210,14 @@ def __call__( domain_obj: Domain[T], current_path: str, ) -> T: + """Sample a value from the crossed-over predefined samplings or the domain. + :param domain_obj: The domain object from which to sample. + :param current_path: The path for which to sample a value. + :return: A sampled value, either from the crossed-over predefined samplings or + from the domain. + :raises ValueError: If the current path is not in the crossed-over predefined + samplings and the domain does not have a prior defined. + """ return self._random_sampler(domain_obj=domain_obj, current_path=current_path) @@ -757,6 +1266,19 @@ def _crossover_samplings_to_make_by_mixing( class SamplingResolutionContext: + """A context for resolving samplings in a NePS space. + It manages the resolution root, domain sampler, environment values, + and keeps track of samplings made and resolved objects. + :param resolution_root: The root of the resolution, which should be a Resolvable + object. + :param domain_sampler: The DomainSampler to use for sampling from Domain objects. + :param environment_values: A mapping of environment values that are fixed and not + related + to samplings. These values can be used in the resolution process. + :raises ValueError: If the resolution_root is not a Resolvable, or if the + domain_sampler is not a DomainSampler, or if the environment_values is not a Mapping. + """ + def __init__( self, *, @@ -764,19 +1286,40 @@ def __init__( domain_sampler: DomainSampler, environment_values: Mapping[str, Any], ): + """Initialize the SamplingResolutionContext with a resolution root, domain + sampler, and environment values. + :param resolution_root: The root of the resolution, which should be a Resolvable + object. + :param domain_sampler: The DomainSampler to use for sampling from Domain objects. + :param environment_values: A mapping of environment values that are fixed and not + related to samplings. These values can be used in the resolution process. + :raises ValueError: If the resolution_root is not a Resolvable, or if the + domain_sampler is not a DomainSampler, or if the environment_values is not a + Mapping. + """ if not isinstance(resolution_root, Resolvable): - raise ValueError(f"The received `resolution_root` is not a Resolvable: {resolution_root!r}.") + raise ValueError( + "The received `resolution_root` is not a Resolvable:" + f" {resolution_root!r}." + ) if not isinstance(domain_sampler, DomainSampler): - raise ValueError(f"The received `domain_sampler` is not a DomainSampler: {domain_sampler!r}.") + raise ValueError( + "The received `domain_sampler` is not a DomainSampler:" + f" {domain_sampler!r}." + ) if not isinstance(environment_values, Mapping): - raise ValueError(f"The received `environment_values` is not a Mapping: {environment_values!r}.") + raise ValueError( + "The received `environment_values` is not a Mapping:" + f" {environment_values!r}." + ) # `_resolution_root` stores the root of the resolution. self._resolution_root: Resolvable = resolution_root - # `_domain_sampler` stores the object responsible for sampling from Domain objects. + # `_domain_sampler` stores the object responsible for sampling from Domain + # objects. self._domain_sampler = domain_sampler # # `_environment_values` stores fixed values from outside. @@ -795,20 +1338,36 @@ def __init__( @property def resolution_root(self) -> Resolvable: + """Get the root of the resolution. + :return: The root of the resolution, which should be a Resolvable object. + """ return self._resolution_root @property def samplings_made(self) -> Mapping[str, Any]: + """Get the samplings made during the resolution process. + :return: A mapping of paths to sampled values. + """ return self._samplings_made @property def environment_values(self) -> Mapping[str, Any]: + """Get the environment values that are fixed and not related to samplings. + :return: A mapping of environment variable names to their values. + """ return self._environment_values @contextlib.contextmanager def resolving(self, _obj: Any, name: str) -> Generator[None]: + """Context manager for resolving an object in the current resolution context. + :param _obj: The object being resolved, can be any type. + :param name: The name of the object being resolved, used for debugging. + :raises ValueError: If the name is not a valid string. + """ if not name or not isinstance(name, str): - raise ValueError(f"Given name for what we are resolving is invalid: {name!r}.") + raise ValueError( + f"Given name for what we are resolving is invalid: {name!r}." + ) # It is possible that the received object has already been resolved. # That is expected and is okay, so no check is made for it. @@ -821,27 +1380,52 @@ def resolving(self, _obj: Any, name: str) -> Generator[None]: self._current_path_parts.pop() def was_already_resolved(self, obj: Any) -> bool: + """Check if the given object was already resolved in the current context. + :param obj: The object to check if it was already resolved. + :return: True if the object was already resolved, False otherwise. + """ return obj in self._resolved_objects def add_resolved(self, original: Any, resolved: Any) -> None: + """Add a resolved object to the context. + :param original: The original object that was resolved. + :param resolved: The resolved value of the original object. + :raises ValueError: If the original object was already resolved or if it is a + Resampled. + """ if self.was_already_resolved(original): raise ValueError( f"Original object has already been resolved: {original!r}. " + "\nIf you are doing resampling by name, " - + "make sure you are not forgetting to request resampling also for related objects." - + "\nOtherwise it could lead to infinite recursion." + + "make sure you are not forgetting to request resampling also for" + " related objects." + "\nOtherwise it could lead to infinite recursion." ) if isinstance(original, Resampled): - raise ValueError(f"Attempting to add a Resampled object to resolved values: {original!r}.") + raise ValueError( + f"Attempting to add a Resampled object to resolved values: {original!r}." + ) self._resolved_objects[original] = resolved def get_resolved(self, obj: Any) -> Any: + """Get the resolved value for the given object. + :param obj: The object for which to get the resolved value. + :return: The resolved value of the object. + :raises ValueError: If the object was not already resolved in the context. + """ try: return self._resolved_objects[obj] - except KeyError: - raise ValueError(f"Given object was not already resolved. Please check first: {obj!r}") + except KeyError as err: + raise ValueError( + f"Given object was not already resolved. Please check first: {obj!r}" + ) from err def sample_from(self, domain_obj: Domain) -> Any: + """Sample a value from the given domain object. + :param domain_obj: The domain object from which to sample a value. + :return: The sampled value from the domain object. + :raises ValueError: If the domain object was already resolved or if the path + has already been sampled from. + """ # Each `domain_obj` is only ever sampled from once. # This is okay and the expected behavior. # For each `domain_obj`, its sampled value is either directly stored itself, @@ -850,15 +1434,17 @@ def sample_from(self, domain_obj: Domain) -> Any: # and so the `domain_obj` will not be re-sampled from again. if self.was_already_resolved(domain_obj): raise ValueError( - f"We have already sampled a value for the given domain object: {domain_obj!r}." - + "\nThis should not be happening." + "We have already sampled a value for the given domain object:" + f" {domain_obj!r}." + "\nThis should not be happening." ) # The range compatibility identifier is there to make sure when we say # the path matches, that the range for the value we are looking up also matches. domain_obj_type_name = type(domain_obj).__name__.lower() range_compatibility_identifier = domain_obj.range_compatibility_identifier - domain_obj_identifier = f"{domain_obj_type_name}__{range_compatibility_identifier}" + domain_obj_identifier = ( + f"{domain_obj_type_name}__{range_compatibility_identifier}" + ) current_path = ".".join(self._current_path_parts) current_path += "::" + domain_obj_identifier @@ -880,19 +1466,47 @@ def sample_from(self, domain_obj: Domain) -> Any: return self._samplings_made[current_path] def get_value_from_environment(self, var_name: str) -> Any: + """Get a value from the environment variables. + :param var_name: The name of the environment variable to get the value from. + :return: The value of the environment variable. + :raises ValueError: If the environment variable is not found in the context. + """ try: return self._environment_values[var_name] - except KeyError: - raise ValueError(f"No value is available for the environment variable {var_name!r}.") + except KeyError as err: + raise ValueError( + f"No value is available for the environment variable {var_name!r}." + ) from err class SamplingResolver: + """A class responsible for resolving samplings in a NePS space. + It uses a SamplingResolutionContext to manage the resolution process, + and a DomainSampler to sample values from Domain objects. + :param resolver: The resolver to use for resolving objects. + This should be a callable that takes an object and a context and returns the resolved + object. + :raises ValueError: If the resolver is not a callable or if it is not a + DomainSampler or a SamplingResolutionContext. + """ + def __call__( self, obj: Resolvable, domain_sampler: DomainSampler, environment_values: Mapping[str, Any], ) -> tuple[Resolvable, SamplingResolutionContext]: + """Resolve the given object in the context of the provided domain sampler and + environment values. + :param obj: The Resolvable object to resolve. + :param domain_sampler: The DomainSampler to use for sampling from Domain objects. + :param environment_values: A mapping of environment values that are fixed and not + related to samplings. + :return: A tuple containing the resolved object and the + SamplingResolutionContext. + :raises ValueError: If the object is not a Resolvable, or if the domain_sampler + is not a DomainSampler, or if the environment_values is not a Mapping. + """ context = SamplingResolutionContext( resolution_root=obj, domain_sampler=domain_sampler, @@ -914,7 +1528,8 @@ def _resolver_dispatch( # No need to store or lookup from context, directly return the given object. if isinstance(any_obj, Resolvable): raise ValueError( - f"The default resolver is not supposed to be called for resolvable objects. Received: {any_obj!r}." + "The default resolver is not supposed to be called for resolvable" + f" objects. Received: {any_obj!r}." ) return any_obj @@ -934,7 +1549,9 @@ def _( for attr_name, initial_attr_value in initial_attrs.items(): resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) final_attrs[attr_name] = resolved_attr_value - needed_resolving = needed_resolving or (initial_attr_value is not resolved_attr_value) + needed_resolving = needed_resolving or ( + initial_attr_value is not resolved_attr_value + ) result = pipeline_obj if needed_resolving: @@ -959,7 +1576,9 @@ def _( for attr_name, initial_attr_value in initial_attrs.items(): resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) final_attrs[attr_name] = resolved_attr_value - needed_resolving = needed_resolving or (initial_attr_value is not resolved_attr_value) + needed_resolving = needed_resolving or ( + initial_attr_value is not resolved_attr_value + ) resolved_domain_obj = domain_obj if needed_resolving: @@ -1004,22 +1623,32 @@ def _( # If we add a `_resolve_tuple` functionality to go into tuples # and resolve their contents, the call below will likely # lead to too much work being done or issues. - resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) + resolved_attr_value = self._resolve( + initial_attr_value, attr_name, context + ) else: resolved_attr_value = initial_attr_value else: - resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) + resolved_attr_value = self._resolve( + initial_attr_value, attr_name, context + ) final_attrs[attr_name] = resolved_attr_value - needed_resolving = needed_resolving or (initial_attr_value is not resolved_attr_value) + needed_resolving = needed_resolving or ( + initial_attr_value is not resolved_attr_value + ) resolved_categorical_obj = categorical_obj if needed_resolving: - resolved_categorical_obj = cast(Categorical, categorical_obj.from_attrs(final_attrs)) + resolved_categorical_obj = cast( + Categorical, categorical_obj.from_attrs(final_attrs) + ) try: sampled_index = context.sample_from(resolved_categorical_obj) except Exception as e: - raise ValueError(f"Failed to sample from {resolved_categorical_obj!r}.") from e + raise ValueError( + f"Failed to sample from {resolved_categorical_obj!r}." + ) from e sampled_value = cast(tuple, resolved_categorical_obj.choices)[sampled_index] result = self._resolve(sampled_value, "sampled_value", context) @@ -1035,9 +1664,9 @@ def _( if context.was_already_resolved(operation_obj): return context.get_resolved(operation_obj) - # It is possible that the `operation_obj` will require two runs to be fully resolved. - # For example if `operation_obj.args` is not defined as a tuple of args, - # but is a Resolvable that needs to be resolved first itself, + # It is possible that the `operation_obj` will require two runs to be fully + # resolved. For example if `operation_obj.args` is not defined as a tuple of + # args, but is a Resolvable that needs to be resolved first itself, # for us to have the actual tuple of args. # First run. @@ -1049,7 +1678,9 @@ def _( for attr_name, initial_attr_value in initial_attrs.items(): resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) final_attrs[attr_name] = resolved_attr_value - needed_resolving = needed_resolving or (initial_attr_value is not resolved_attr_value) + needed_resolving = needed_resolving or ( + initial_attr_value is not resolved_attr_value + ) operation_obj_first_run = operation_obj if needed_resolving: @@ -1066,7 +1697,9 @@ def _( for attr_name, initial_attr_value in initial_attrs.items(): resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) final_attrs[attr_name] = resolved_attr_value - needed_resolving = needed_resolving or (initial_attr_value is not resolved_attr_value) + needed_resolving = needed_resolving or ( + initial_attr_value is not resolved_attr_value + ) operation_obj_second_run = operation_obj_first_run if needed_resolving: @@ -1101,9 +1734,9 @@ def _( resolvable_to_resample_obj = resampled_obj.from_attrs(initial_attrs) type_name = type(resolvable_to_resample_obj).__name__.lower() - result = self._resolve(resolvable_to_resample_obj, f"resampled_{type_name}", context) - - return result + return self._resolve( + resolvable_to_resample_obj, f"resampled_{type_name}", context + ) @_resolver_dispatch.register def _( @@ -1123,27 +1756,35 @@ def _( # environment value, which we look up by the attribute name of the # received fidelity object inside the resolution root. - names_for_this_fidelity_obj = list( + names_for_this_fidelity_obj = [ attr_name for attr_name, attr_value in context.resolution_root.get_attrs().items() if attr_value is fidelity_obj - ) + ] if len(names_for_this_fidelity_obj) == 0: - raise ValueError("A fidelity object should be a direct attribute of the pipeline.") - elif len(names_for_this_fidelity_obj) > 1: - raise ValueError("A fidelity object should only be referenced once in the pipeline.") + raise ValueError( + "A fidelity object should be a direct attribute of the pipeline." + ) + if len(names_for_this_fidelity_obj) > 1: + raise ValueError( + "A fidelity object should only be referenced once in the pipeline." + ) fidelity_name = names_for_this_fidelity_obj[0] try: result = context.get_value_from_environment(fidelity_name) - except ValueError: - raise ValueError(f"No value is available in the environment for fidelity {fidelity_name!r}.") + except ValueError as err: + raise ValueError( + "No value is available in the environment for fidelity" + f" {fidelity_name!r}." + ) from err if not fidelity_obj.min_value <= result <= fidelity_obj.max_value: raise ValueError( - f"Value for fidelity with name {fidelity_name!r} is outside its allowed range " + f"Value for fidelity with name {fidelity_name!r} is outside its allowed" + " range " + f"[{fidelity_obj.min_value!r}, {fidelity_obj.max_value!r}]. " + f"Received: {result!r}." ) @@ -1155,11 +1796,14 @@ def _( def _( self, resolvable_obj: Resolvable, - context: SamplingResolutionContext, + context: SamplingResolutionContext, # noqa: ARG002 ) -> Any: - # Called when no specialized resolver was available for the specific resolvable type. - # That is not something that is normally expected. - raise ValueError(f"No specialized resolver was registered for object of type {type(resolvable_obj)!r}.") + # Called when no specialized resolver was available for the specific resolvable + # type. That is not something that is normally expected. + raise ValueError( + "No specialized resolver was registered for object of type" + f" {type(resolvable_obj)!r}." + ) def resolve( @@ -1167,6 +1811,16 @@ def resolve( domain_sampler: DomainSampler | None = None, environment_values: Mapping[str, Any] | None = None, ) -> tuple[P, SamplingResolutionContext]: + """Resolve a NePS pipeline with the given domain sampler and environment values. + :param pipeline: The pipeline to resolve, which should be a Pipeline object. + :param domain_sampler: The DomainSampler to use for sampling from Domain objects. + If None, a RandomSampler with no predefined values will be used. + :param environment_values: A mapping of environment variable names to their values. + If None, an empty mapping will be used. + :return: A tuple containing the resolved pipeline and the SamplingResolutionContext. + :raises ValueError: If the pipeline is not a Pipeline object or if the domain_sampler + is not a DomainSampler or if the environment_values is not a Mapping. + """ if domain_sampler is None: # By default, use a random sampler with no predefined values. domain_sampler = RandomSampler(predefined_samplings={}) @@ -1188,19 +1842,26 @@ def resolve( def convert_operation_to_callable(operation: Operation) -> Callable: + """Convert an Operation to a callable that can be executed. + :param operation: The Operation to convert. + :return: A callable that represents the operation. + :raises ValueError: If the operation is not a valid Operation object. + """ operator = cast(Callable, operation.operator) operation_args = [] for arg in operation.args: - if isinstance(arg, Operation): - arg = convert_operation_to_callable(arg) - operation_args.append(arg) + operation_args.append( + convert_operation_to_callable(arg) if isinstance(arg, Operation) else arg + ) operation_kwargs = {} for kwarg_name, kwarg_value in operation.kwargs.items(): - if isinstance(kwarg_value, Operation): - kwarg_value = convert_operation_to_callable(kwarg_value) - operation_kwargs[kwarg_name] = kwarg_value + operation_kwargs[kwarg_name] = ( + convert_operation_to_callable(kwarg_value) + if isinstance(kwarg_value, Operation) + else kwarg_value + ) return cast(Callable, operator(*operation_args, **operation_kwargs)) @@ -1237,15 +1898,30 @@ def _operation_to_unwrapped_config( def convert_operation_to_string(operation: Operation) -> str: + """Convert an Operation to a string representation. + :param operation: The Operation to convert. + :return: A string representation of the operation. + :raises ValueError: If the operation is not a valid Operation object. + """ unwrapped_config = tuple(_operation_to_unwrapped_config(operation)) - return cast(str, config_string.wrap_config_into_string(unwrapped_config)) + return config_string.wrap_config_into_string(unwrapped_config) # ------------------------------------------------- class RandomSearch: + """A simple random search optimizer for a NePS pipeline. + It samples configurations randomly from the pipeline's domain and environment values. + :param pipeline: The pipeline to optimize, which should be a Pipeline object. + :raises ValueError: If the pipeline is not a Pipeline object. + """ + def __init__(self, pipeline: Pipeline): + """Initialize the RandomSearch optimizer with a pipeline. + :param pipeline: The pipeline to optimize, which should be a Pipeline object. + :raises ValueError: If the pipeline is not a Pipeline object. + """ self._pipeline = pipeline self._environment_values = {} @@ -1258,9 +1934,21 @@ def __init__(self, pipeline: Pipeline): def __call__( self, trials: Mapping[str, trial_state.Trial], - budget_info: optimizer_state.BudgetInfo | None, + budget_info: optimizer_state.BudgetInfo | None, # noqa: ARG002 n: int | None = None, ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: + """Sample configurations randomly from the pipeline's domain and environment + values. + :param trials: A mapping of trial IDs to Trial objects, representing previous + trials. + :param budget_info: The budget information for the optimization process. + :param n: The number of configurations to sample. If None, a single configuration + will be sampled. + :return: A SampledConfig object or a list of SampledConfig objects, depending + on the value of n. + :raises ValueError: If the pipeline is not a Pipeline object or if the trials are + not a valid mapping of trial IDs to Trial objects. + """ n_prev_trials = len(trials) n_requested = 1 if n is None else n return_single = n is None @@ -1278,7 +1966,18 @@ def __call__( class ComplexRandomSearch: + """A complex random search optimizer for a NePS pipeline. + It samples configurations randomly from the pipeline's domain and environment values, + and also performs mutations and crossovers based on previous successful trials. + :param pipeline: The pipeline to optimize, which should be a Pipeline object. + :raises ValueError: If the pipeline is not a Pipeline object. + """ + def __init__(self, pipeline: Pipeline): + """Initialize the ComplexRandomSearch optimizer with a pipeline. + :param pipeline: The pipeline to optimize, which should be a Pipeline object. + :raises ValueError: If the pipeline is not a Pipeline object. + """ self._pipeline = pipeline self._environment_values = {} @@ -1301,9 +2000,22 @@ def __init__(self, pipeline: Pipeline): def __call__( self, trials: Mapping[str, trial_state.Trial], - budget_info: optimizer_state.BudgetInfo | None, + budget_info: optimizer_state.BudgetInfo | None, # noqa: ARG002 n: int | None = None, ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: + """Sample configurations randomly from the pipeline's domain and environment + values, and also perform mutations and crossovers based on previous successful + trials. + :param trials: A mapping of trial IDs to Trial objects, representing previous + trials. + :param budget_info: The budget information for the optimization process. + :param n: The number of configurations to sample. If None, a single configuration + will be sampled. + :return: A SampledConfig object or a list of SampledConfig objects, depending + on the value of n. + :raises ValueError: If the pipeline is not a Pipeline object or if the trials are + not a valid mapping of trial IDs to Trial objects. + """ n_prev_trials = len(trials) n_requested = 1 if n is None else n return_single = n is None @@ -1328,9 +2040,11 @@ def __call__( mutated_incumbents = [] crossed_over_incumbents = [] - successful_trials = list( + successful_trials: list[Trial] = list( filter( - lambda trial: trial.report.reported_as == trial.State.SUCCESS, + lambda trial: trial.report.reported_as == trial.State.SUCCESS + if trial.report is not None + else False, trials.values(), ) ) @@ -1339,7 +2053,9 @@ def __call__( top_trials = heapq.nsmallest( n_top_trials, successful_trials, - key=lambda trial: trial.report.objective_to_minimize, + key=lambda trial: float(trial.report.objective_to_minimize) + if trial.report and isinstance(trial.report.objective_to_minimize, float) + else float("inf"), ) # Will have up to `n_top_trials` items. # Do some mutations. @@ -1363,7 +2079,9 @@ def __call__( pipeline=self._pipeline, domain_sampler=MutatateUsingCentersSampler( predefined_samplings=top_trial_config, - n_mutations=max(1, random.randint(1, int(len(top_trial_config) / 2))), + n_mutations=max( + 1, random.randint(1, int(len(top_trial_config) / 2)) + ), ), environment_values=self._environment_values, ) @@ -1387,7 +2105,9 @@ def __call__( pipeline=self._pipeline, domain_sampler=MutateByForgettingSampler( predefined_samplings=top_trial_config, - n_forgets=max(1, random.randint(1, int(len(top_trial_config) / 2))), + n_forgets=max( + 1, random.randint(1, int(len(top_trial_config) / 2)) + ), ), environment_values=self._environment_values, ) @@ -1462,6 +2182,13 @@ def __call__( class NepsCompatConverter: + """A class to convert between NePS configurations and NEPS-compatible configurations. + It provides methods to convert a SamplingResolutionContext to a NEPS-compatible config + and to convert a NEPS-compatible config back to a SamplingResolutionContext. + :param resolution_context: The SamplingResolutionContext to convert. + :raises ValueError: If the resolution_context is not a SamplingResolutionContext. + """ + _SAMPLING_PREFIX = "SAMPLING__" _ENVIRONMENT_PREFIX = "ENVIRONMENT__" _SAMPLING_PREFIX_LEN = len(_SAMPLING_PREFIX) @@ -1478,6 +2205,11 @@ def to_neps_config( cls, resolution_context: SamplingResolutionContext, ) -> Mapping[str, Any]: + """Convert a SamplingResolutionContext to a NEPS-compatible config. + :param resolution_context: The SamplingResolutionContext to convert. + :return: A mapping of NEPS-compatible configuration keys to their values. + :raises ValueError: If the resolution_context is not a SamplingResolutionContext. + """ config: dict[str, Any] = {} samplings_made = resolution_context.samplings_made @@ -1495,6 +2227,12 @@ def from_neps_config( cls, config: Mapping[str, Any], ) -> _FromNepsConfigResult: + """Convert a NEPS-compatible config to a SamplingResolutionContext. + :param config: A mapping of NEPS-compatible configuration keys to their values. + :return: A _FromNepsConfigResult containing predefined samplings, + environment values, and extra kwargs. + :raises ValueError: If the config is not a valid NEPS-compatible config. + """ predefined_samplings = {} environment_values = {} extra_kwargs = {} @@ -1519,7 +2257,7 @@ def from_neps_config( def _prepare_sampled_configs( chosen_pipelines: list[tuple[Pipeline, SamplingResolutionContext]], n_prev_trials: int, - return_single: bool, + return_single: bool, # noqa: FBT001 ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: configs = [] for i, (_resolved_pipeline, resolution_context) in enumerate(chosen_pipelines): @@ -1540,11 +2278,23 @@ def _prepare_sampled_configs( return configs -def adjust_evaluation_pipeline_for_new_space( +def adjust_evaluation_pipeline_for_neps_space( evaluation_pipeline: Callable, pipeline_space: P, operation_converter: Callable[[Operation], Any] = convert_operation_to_callable, ) -> Callable | str: + """Adjust the evaluation pipeline to work with a NePS space. + This function wraps the evaluation pipeline to sample from the NePS space + and convert the sampled pipeline to a format compatible with the evaluation pipeline. + :param evaluation_pipeline: The evaluation pipeline to adjust. + :param pipeline_space: The NePS pipeline space to sample from. + :param operation_converter: A callable to convert Operation objects to a format + compatible with the evaluation pipeline. + :return: A wrapped evaluation pipeline that samples from the NePS space. + :raises ValueError: If the evaluation_pipeline is not callable or if the + pipeline_space is not a Pipeline object. + """ + @functools.wraps(evaluation_pipeline) def inner(*args: Any, **kwargs: Any) -> Any: # `kwargs` can contain other things not related to diff --git a/neps/space/neps_spaces/optimizers/__init__.py b/neps/space/neps_spaces/optimizers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neps/space/new_space/priorband.py b/neps/space/neps_spaces/optimizers/priorband.py similarity index 74% rename from neps/space/new_space/priorband.py rename to neps/space/neps_spaces/optimizers/priorband.py index 747988265..32d8b03f0 100644 --- a/neps/space/new_space/priorband.py +++ b/neps/space/neps_spaces/optimizers/priorband.py @@ -1,3 +1,10 @@ +"""PriorBand Sampler for NePS Optimizers. +This sampler implements the PriorBand algorithm, which is a sampling strategy +that combines prior knowledge with random sampling to efficiently explore the search +space. It uses a combination of prior sampling, incumbent mutation, and random sampling +based on the fidelity bounds and SH bracket. +""" + from __future__ import annotations import random @@ -7,7 +14,7 @@ import numpy as np from neps.optimizers.utils import brackets -import neps.space.new_space.space as new_space +from neps.space.neps_spaces import neps_space if TYPE_CHECKING: import pandas as pd @@ -15,10 +22,10 @@ @dataclass class PriorBandSampler: - """Implement a sampler based on PriorBand""" + """Implement a sampler based on PriorBand.""" """The pipeline space to optimize over.""" - space: new_space.Pipeline + space: neps_space.Pipeline """The eta value to use for the SH bracket.""" eta: int @@ -30,6 +37,16 @@ class PriorBandSampler: fid_bounds: tuple[int, int] | tuple[float, float] def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: + """Sample a configuration based on the PriorBand algorithm. + + Args: + table (pd.DataFrame): The table containing the configurations and their + performance. + rung (int): The current rung of the optimization. + + Returns: + dict[str, Any]: A sampled configuration. + """ rung_to_fid, rung_sizes = brackets.calculate_sh_rungs( bounds=self.fid_bounds, eta=self.eta, @@ -53,11 +70,15 @@ def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: # For SH bracket cost, we include the fact we can continue runs, # i.e. resources for rung 2 discounts the cost of evaluating to rung 1, # only counting the difference in fidelity cost between rung 2 and rung 1. - cost_per_rung = {i: rung_to_fid[i] - rung_to_fid.get(i - 1, 0) for i in rung_to_fid} + cost_per_rung = { + i: rung_to_fid[i] - rung_to_fid.get(i - 1, 0) for i in rung_to_fid + } cost_of_one_sh_bracket = sum(rung_sizes[r] * cost_per_rung[r] for r in rung_sizes) current_cost_used = sum(r * cost_per_rung[r] for r in completed_rungs) - spent_one_sh_bracket_worth_of_fidelity = current_cost_used >= cost_of_one_sh_bracket + spent_one_sh_bracket_worth_of_fidelity = ( + current_cost_used >= cost_of_one_sh_bracket + ) # Check that there is at least rung with `eta` evaluations rung_counts = completed.groupby("rung").size() @@ -88,27 +109,15 @@ def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: ] K = len(rung_table) // self.eta - top_k_configs = rung_table.nsmallest(K, columns=["perf"])["config"].tolist() + rung_table.nsmallest(K, columns=["perf"])["config"].tolist() # 2. Get the global incumbent inc_config = completed.loc[completed["perf"].idxmin()]["config"] # 3. Calculate a ratio score of how likely each of the top K configs are under - # the prior and inc distribution, weighing them by their position in the top K - # weights = torch.arange(K, 0, -1) - - # top_k_pdf_inc = inc_dist.pdf_configs(top_k_configs, frm=self.encoder) # type: ignore - # top_k_pdf_prior = prior_dist.pdf_configs(top_k_configs, frm=self.encoder) # type: ignore - - # unnormalized_inc_score = (weights * top_k_pdf_inc).sum() - # unnormalized_prior_score = (weights * top_k_pdf_prior).sum() - # total_score = unnormalized_inc_score + unnormalized_prior_score - - # inc_ratio = float(unnormalized_inc_score / total_score) - # prior_ratio = float(unnormalized_prior_score / total_score) - # TODO: [lum]: Here I am simply using fixed values. - # Will maybe have to come up with a way to approximate the pdf for the top configs. + # Will maybe have to come up with a way to approximate the pdf for the top + # configs. inc_ratio = 0.9 prior_ratio = 0.1 @@ -134,8 +143,8 @@ def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: def _sample_prior(self) -> dict[str, Any]: # TODO: [lum] have a CenterSampler as fallback, not Random - _try_always_priors_sampler = new_space.PriorOrFallbackSampler( - fallback_sampler=new_space.RandomSampler(predefined_samplings={}), + _try_always_priors_sampler = neps_space.PriorOrFallbackSampler( + fallback_sampler=neps_space.RandomSampler(predefined_samplings={}), prior_use_probability=1, ) @@ -144,13 +153,13 @@ def _sample_prior(self) -> dict[str, Any]: for fidelity_name, fidelity_obj in _fidelity_attrs.items(): _environment_values[fidelity_name] = fidelity_obj.max_value - _resolved_pipeline, resolution_context = new_space.resolve( + _resolved_pipeline, resolution_context = neps_space.resolve( pipeline=self.space, domain_sampler=_try_always_priors_sampler, environment_values=_environment_values, ) - config = new_space.NepsCompatConverter.to_neps_config(resolution_context) + config = neps_space.NepsCompatConverter.to_neps_config(resolution_context) return dict(**config) def _sample_random(self) -> dict[str, Any]: @@ -159,26 +168,26 @@ def _sample_random(self) -> dict[str, Any]: for fidelity_name, fidelity_obj in _fidelity_attrs.items(): _environment_values[fidelity_name] = fidelity_obj.max_value - _resolved_pipeline, resolution_context = new_space.resolve( + _resolved_pipeline, resolution_context = neps_space.resolve( pipeline=self.space, - domain_sampler=new_space.RandomSampler(predefined_samplings={}), + domain_sampler=neps_space.RandomSampler(predefined_samplings={}), environment_values=_environment_values, ) - config = new_space.NepsCompatConverter.to_neps_config(resolution_context) + config = neps_space.NepsCompatConverter.to_neps_config(resolution_context) return dict(**config) def _mutate_inc(self, inc_config: dict[str, Any]) -> dict[str, Any]: - data = new_space.NepsCompatConverter.from_neps_config(config=inc_config) + data = neps_space.NepsCompatConverter.from_neps_config(config=inc_config) - _resolved_pipeline, resolution_context = new_space.resolve( + _resolved_pipeline, resolution_context = neps_space.resolve( pipeline=self.space, - domain_sampler=new_space.MutatateUsingCentersSampler( + domain_sampler=neps_space.MutatateUsingCentersSampler( predefined_samplings=data.predefined_samplings, n_mutations=max(1, random.randint(1, int(len(inc_config) / 2))), ), environment_values=data.environment_values, ) - config = new_space.NepsCompatConverter.to_neps_config(resolution_context) + config = neps_space.NepsCompatConverter.to_neps_config(resolution_context) return dict(**config) diff --git a/neps/space/parsing.py b/neps/space/parsing.py index 64fabbec1..fad079c92 100644 --- a/neps/space/parsing.py +++ b/neps/space/parsing.py @@ -9,8 +9,8 @@ from collections.abc import Mapping, Sequence from typing import TYPE_CHECKING, Any, TypeAlias +from neps.space.neps_spaces.neps_space import Pipeline from neps.space.parameters import Categorical, Constant, Float, Integer, Parameter -from neps.space.new_space.space import Pipeline from neps.space.search_space import SearchSpace if TYPE_CHECKING: diff --git a/tests/test_neps_space/test_domain__centering.py b/tests/test_neps_space/test_domain__centering.py index 5ea6676a3..37a236c4e 100644 --- a/tests/test_neps_space/test_domain__centering.py +++ b/tests/test_neps_space/test_domain__centering.py @@ -2,15 +2,15 @@ import pytest -from neps.space.new_space import space +from neps.space.neps_spaces import neps_space @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max"), [ - (space.ConfidenceLevel.LOW, (50, 10, 90)), - (space.ConfidenceLevel.MEDIUM, (50, 25, 75)), - (space.ConfidenceLevel.HIGH, (50, 40, 60)), + (neps_space.ConfidenceLevel.LOW, (50, 10, 90)), + (neps_space.ConfidenceLevel.MEDIUM, (50, 25, 75)), + (neps_space.ConfidenceLevel.HIGH, (50, 40, 60)), ], ) def test_centering_integer( @@ -23,11 +23,11 @@ def test_centering_integer( int_prior = 50 - int1 = space.Integer( + int1 = neps_space.Integer( min_value=1, max_value=100, ) - int2 = space.Integer( + int2 = neps_space.Integer( min_value=1, max_value=100, prior=int_prior, @@ -59,9 +59,9 @@ def test_centering_integer( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max"), [ - (space.ConfidenceLevel.LOW, (50.0, 10.399999999999999, 89.6)), - (space.ConfidenceLevel.MEDIUM, (50.0, 25.25, 74.75)), - (space.ConfidenceLevel.HIGH, (50.0, 40.1, 59.9)), + (neps_space.ConfidenceLevel.LOW, (50.0, 10.399999999999999, 89.6)), + (neps_space.ConfidenceLevel.MEDIUM, (50.0, 25.25, 74.75)), + (neps_space.ConfidenceLevel.HIGH, (50.0, 40.1, 59.9)), ], ) def test_centering_float( @@ -74,11 +74,11 @@ def test_centering_float( float_prior = 50.0 - float1 = space.Float( + float1 = neps_space.Float( min_value=1.0, max_value=100.0, ) - float2 = space.Float( + float2 = neps_space.Float( min_value=1.0, max_value=100.0, prior=float_prior, @@ -110,9 +110,9 @@ def test_centering_float( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max_value"), [ - (space.ConfidenceLevel.LOW, (40, 0, 80, 50)), - (space.ConfidenceLevel.MEDIUM, (25, 0, 50, 50)), - (space.ConfidenceLevel.HIGH, (10, 0, 20, 50)), + (neps_space.ConfidenceLevel.LOW, (40, 0, 80, 50)), + (neps_space.ConfidenceLevel.MEDIUM, (25, 0, 50, 50)), + (neps_space.ConfidenceLevel.HIGH, (10, 0, 20, 50)), ], ) def test_centering_categorical( @@ -125,10 +125,10 @@ def test_centering_categorical( categorical_prior_index_original = 49 - categorical1 = space.Categorical( + categorical1 = neps_space.Categorical( choices=tuple(range(1, 101)), ) - categorical2 = space.Categorical( + categorical2 = neps_space.Categorical( choices=tuple(range(1, 101)), prior_index=categorical_prior_index_original, prior_confidence=confidence_level, @@ -167,22 +167,22 @@ def test_centering_categorical( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max"), [ - (space.ConfidenceLevel.LOW, (10, 5, 13)), - (space.ConfidenceLevel.MEDIUM, (10, 7, 13)), - (space.ConfidenceLevel.HIGH, (10, 8, 12)), + (neps_space.ConfidenceLevel.LOW, (10, 5, 13)), + (neps_space.ConfidenceLevel.MEDIUM, (10, 7, 13)), + (neps_space.ConfidenceLevel.HIGH, (10, 8, 12)), ], ) def test_centering_stranger_ranges_integer( confidence_level, expected_prior_min_max, ): - int1 = space.Integer( + int1 = neps_space.Integer( min_value=1, max_value=13, ) int1_centered = int1.centered_around(10, confidence_level) - int2 = space.Integer( + int2 = neps_space.Integer( min_value=1, max_value=13, prior=10, @@ -208,22 +208,22 @@ def test_centering_stranger_ranges_integer( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max"), [ - (space.ConfidenceLevel.LOW, (0.5, 0.09999999999999998, 0.9)), - (space.ConfidenceLevel.MEDIUM, (0.5, 0.25, 0.75)), - (space.ConfidenceLevel.HIGH, (0.5, 0.4, 0.6)), + (neps_space.ConfidenceLevel.LOW, (0.5, 0.09999999999999998, 0.9)), + (neps_space.ConfidenceLevel.MEDIUM, (0.5, 0.25, 0.75)), + (neps_space.ConfidenceLevel.HIGH, (0.5, 0.4, 0.6)), ], ) def test_centering_stranger_ranges_float( confidence_level, expected_prior_min_max, ): - float1 = space.Float( + float1 = neps_space.Float( min_value=0.0, max_value=1.0, ) float1_centered = float1.centered_around(0.5, confidence_level) - float2 = space.Float( + float2 = neps_space.Float( min_value=0.0, max_value=1.0, prior=0.5, @@ -249,21 +249,21 @@ def test_centering_stranger_ranges_float( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max_value"), [ - (space.ConfidenceLevel.LOW, (2, 0, 5, 2)), - (space.ConfidenceLevel.MEDIUM, (2, 0, 4, 2)), - (space.ConfidenceLevel.HIGH, (1, 0, 2, 2)), + (neps_space.ConfidenceLevel.LOW, (2, 0, 5, 2)), + (neps_space.ConfidenceLevel.MEDIUM, (2, 0, 4, 2)), + (neps_space.ConfidenceLevel.HIGH, (1, 0, 2, 2)), ], ) def test_centering_stranger_ranges_categorical( confidence_level, expected_prior_min_max_value, ): - categorical1 = space.Categorical( + categorical1 = neps_space.Categorical( choices=tuple(range(7)), ) categorical1_centered = categorical1.centered_around(2, confidence_level) - categorical2 = space.Categorical( + categorical2 = neps_space.Categorical( choices=tuple(range(7)), prior_index=2, prior_confidence=confidence_level, diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index 0907ce1da..70bd04282 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -5,7 +5,7 @@ import pytest import neps -from neps.space.new_space import space +from neps.space.neps_spaces import neps_space def hyperparameter_pipeline_to_optimize( @@ -27,130 +27,130 @@ def hyperparameter_pipeline_to_optimize( return objective_to_minimize -class DemoHyperparameterSpace(space.Pipeline): - float1 = space.Float( +class DemoHyperparameterSpace(neps_space.Pipeline): + float1 = neps_space.Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - float2 = space.Float( + float2 = neps_space.Float( min_value=-10, max_value=10, prior=0.1, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - categorical = space.Categorical( + categorical = neps_space.Categorical( choices=(0, 1), prior_index=0, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - integer1 = space.Integer( + integer1 = neps_space.Integer( min_value=0, max_value=1, prior=0, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - integer2 = space.Integer( + integer2 = neps_space.Integer( min_value=1, max_value=1000, prior=10, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) -class DemoHyperparameterWithFidelitySpace(space.Pipeline): - float1 = space.Float( +class DemoHyperparameterWithFidelitySpace(neps_space.Pipeline): + float1 = neps_space.Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - float2 = space.Float( + float2 = neps_space.Float( min_value=-10, max_value=10, prior=0.1, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - categorical = space.Categorical( + categorical = neps_space.Categorical( choices=(0, 1), prior_index=0, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - integer1 = space.Integer( + integer1 = neps_space.Integer( min_value=0, max_value=1, prior=0, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - integer2 = space.Fidelity( - space.Integer( + integer2 = neps_space.Fidelity( + neps_space.Integer( min_value=1, max_value=1000, ), ) -class DemoHyperparameterComplexSpace(space.Pipeline): - _small_float = space.Float( +class DemoHyperparameterComplexSpace(neps_space.Pipeline): + _small_float = neps_space.Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - _big_float = space.Float( + _big_float = neps_space.Float( min_value=10, max_value=100, prior=20, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - float1 = space.Categorical( + float1 = neps_space.Categorical( choices=( - space.Resampled(_small_float), - space.Resampled(_big_float), + neps_space.Resampled(_small_float), + neps_space.Resampled(_big_float), ), prior_index=0, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - float2 = space.Categorical( + float2 = neps_space.Categorical( choices=( - space.Resampled(_small_float), - space.Resampled(_big_float), + neps_space.Resampled(_small_float), + neps_space.Resampled(_big_float), float1, ), prior_index=0, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - categorical = space.Categorical( + categorical = neps_space.Categorical( choices=(0, 1), prior_index=0, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - integer1 = space.Integer( + integer1 = neps_space.Integer( min_value=0, max_value=1, prior=0, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - integer2 = space.Integer( + integer2 = neps_space.Integer( min_value=1, max_value=1000, prior=10, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) @pytest.mark.parametrize( "optimizer", - [space.RandomSearch, space.ComplexRandomSearch], + [neps_space.RandomSearch, neps_space.ComplexRandomSearch], ) def test_hyperparameter_demo(optimizer): pipeline_space = DemoHyperparameterSpace() root_directory = f"results/hyperparameter_demo__{optimizer.__name__}" neps.run( - evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( + evaluate_pipeline=neps_space.adjust_evaluation_pipeline_for_neps_space( hyperparameter_pipeline_to_optimize, pipeline_space, ), @@ -166,14 +166,14 @@ def test_hyperparameter_demo(optimizer): @pytest.mark.parametrize( "optimizer", - [space.RandomSearch, space.ComplexRandomSearch], + [neps_space.RandomSearch, neps_space.ComplexRandomSearch], ) def test_hyperparameter_with_fidelity_demo(optimizer): pipeline_space = DemoHyperparameterWithFidelitySpace() root_directory = f"results/hyperparameter_with_fidelity_demo__{optimizer.__name__}" neps.run( - evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( + evaluate_pipeline=neps_space.adjust_evaluation_pipeline_for_neps_space( hyperparameter_pipeline_to_optimize, pipeline_space, ), @@ -189,14 +189,14 @@ def test_hyperparameter_with_fidelity_demo(optimizer): @pytest.mark.parametrize( "optimizer", - [space.RandomSearch, space.ComplexRandomSearch], + [neps_space.RandomSearch, neps_space.ComplexRandomSearch], ) def test_hyperparameter_complex_demo(optimizer): pipeline_space = DemoHyperparameterComplexSpace() root_directory = f"results/hyperparameter_complex_demo__{optimizer.__name__}" neps.run( - evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( + evaluate_pipeline=neps_space.adjust_evaluation_pipeline_for_neps_space( hyperparameter_pipeline_to_optimize, pipeline_space, ), @@ -264,18 +264,18 @@ def operation_pipeline_to_optimize(model: Model, some_hp: str): return objective_to_minimize -class DemoOperationSpace(space.Pipeline): +class DemoOperationSpace(neps_space.Pipeline): """A demonstration of how to use operations in a search space. This space defines a model that can be optimized using different inner functions and a factor. The model can be used to evaluate a set of values and return an objective to minimize. """ # The way to sample `factor` values - _factor = space.Float( + _factor = neps_space.Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) # Sum @@ -283,45 +283,45 @@ class DemoOperationSpace(space.Pipeline): # `Sum()` # Could have also been defined using the python `sum` function as # `_sum = space.Operation(operator=lambda: sum)` - _sum = space.Operation(operator=Sum) + _sum = neps_space.Operation(operator=Sum) # MultipliedSum # Will be equivalent to something like # `MultipliedSum(factor=0.2)` - _multiplied_sum = space.Operation( + _multiplied_sum = neps_space.Operation( operator=MultipliedSum, - kwargs={"factor": space.Resampled(_factor)}, + kwargs={"factor": neps_space.Resampled(_factor)}, ) # Model # Will be equivalent to something like one of # `Model(Sum(), factor=0.1)` # `Model(MultipliedSum(factor=0.2), factor=0.1)` - _inner_function = space.Categorical( + _inner_function = neps_space.Categorical( choices=(_sum, _multiplied_sum), ) - model = space.Operation( + model = neps_space.Operation( operator=Model, args=(_inner_function,), - kwargs={"factor": space.Resampled(_factor)}, + kwargs={"factor": neps_space.Resampled(_factor)}, ) # An additional hyperparameter - some_hp = space.Categorical( + some_hp = neps_space.Categorical( choices=("hp1", "hp2"), ) @pytest.mark.parametrize( "optimizer", - [space.RandomSearch, space.ComplexRandomSearch], + [neps_space.RandomSearch, neps_space.ComplexRandomSearch], ) def test_operation_demo(optimizer): pipeline_space = DemoOperationSpace() root_directory = f"results/operation_demo__{optimizer.__name__}" neps.run( - evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( + evaluate_pipeline=neps_space.adjust_evaluation_pipeline_for_neps_space( operation_pipeline_to_optimize, pipeline_space, ), diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index 95e91e75c..e3452146e 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -7,8 +7,8 @@ import neps import neps.optimizers.algorithms as old_algorithms -import neps.space.new_space.bracket_optimizer as new_bracket_optimizer -from neps.space.new_space import space +import neps.space.neps_spaces.bracket_optimizer as new_bracket_optimizer +from neps.space.neps_spaces import neps_space _COSTS = {} @@ -56,28 +56,28 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): } -class DemoHyperparameterWithFidelitySpace(space.Pipeline): - float1 = space.Float( +class DemoHyperparameterWithFidelitySpace(neps_space.Pipeline): + float1 = neps_space.Float( min_value=1, max_value=1000, log=False, prior=600, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - float2 = space.Float( + float2 = neps_space.Float( min_value=-100, max_value=100, prior=0, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - integer1 = space.Integer( + integer1 = neps_space.Integer( min_value=0, max_value=500, prior=35, - prior_confidence=space.ConfidenceLevel.LOW, + prior_confidence=neps_space.ConfidenceLevel.LOW, ) - fidelity = space.Fidelity( - domain=space.Integer( + fidelity = neps_space.Fidelity( + domain=neps_space.Integer( min_value=1, max_value=100, ), @@ -88,11 +88,11 @@ class DemoHyperparameterWithFidelitySpace(space.Pipeline): ("optimizer", "optimizer_name"), [ ( - space.RandomSearch, + neps_space.RandomSearch, "new__RandomSearch", ), ( - space.ComplexRandomSearch, + neps_space.ComplexRandomSearch, "new__ComplexRandomSearch", ), ( @@ -122,7 +122,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): _COSTS.clear() neps.run( - evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( + evaluate_pipeline=neps_space.adjust_evaluation_pipeline_for_neps_space( evaluate_pipeline, pipeline_space, ), diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index 1da454974..dea908e0d 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -7,8 +7,8 @@ import neps import neps.optimizers.algorithms as old_algorithms -import neps.space.new_space.bracket_optimizer as new_bracket_optimizer -from neps.space.new_space import space +import neps.space.neps_spaces.bracket_optimizer as new_bracket_optimizer +from neps.space.neps_spaces import neps_space def evaluate_pipeline(float1, float2, integer1, fidelity): @@ -43,28 +43,28 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): } -class DemoHyperparameterWithFidelitySpace(space.Pipeline): - float1 = space.Float( +class DemoHyperparameterWithFidelitySpace(neps_space.Pipeline): + float1 = neps_space.Float( min_value=1, max_value=1000, log=False, prior=600, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - float2 = space.Float( + float2 = neps_space.Float( min_value=-100, max_value=100, prior=0, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - integer1 = space.Integer( + integer1 = neps_space.Integer( min_value=0, max_value=500, prior=35, - prior_confidence=space.ConfidenceLevel.LOW, + prior_confidence=neps_space.ConfidenceLevel.LOW, ) - fidelity = space.Fidelity( - domain=space.Integer( + fidelity = neps_space.Fidelity( + domain=neps_space.Integer( min_value=1, max_value=100, ), @@ -75,11 +75,11 @@ class DemoHyperparameterWithFidelitySpace(space.Pipeline): ("optimizer", "optimizer_name"), [ ( - space.RandomSearch, + neps_space.RandomSearch, "new__RandomSearch", ), ( - space.ComplexRandomSearch, + neps_space.ComplexRandomSearch, "new__ComplexRandomSearch", ), ( @@ -106,7 +106,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): root_directory = f"results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" neps.run( - evaluate_pipeline=space.adjust_evaluation_pipeline_for_new_space( + evaluate_pipeline=neps_space.adjust_evaluation_pipeline_for_neps_space( evaluate_pipeline, pipeline_space, ), diff --git a/tests/test_neps_space/test_search_space__fidelity.py b/tests/test_neps_space/test_search_space__fidelity.py index 0ed29a9c3..8b8d5a151 100644 --- a/tests/test_neps_space/test_search_space__fidelity.py +++ b/tests/test_neps_space/test_search_space__fidelity.py @@ -4,19 +4,19 @@ import pytest -from neps.space.new_space import space +from neps.space.neps_spaces import neps_space -class DemoHyperparametersWithFidelitySpace(space.Pipeline): +class DemoHyperparametersWithFidelitySpace(neps_space.Pipeline): constant1: int = 42 - float1: float = space.Float( + float1: float = neps_space.Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ) - fidelity_integer1: int = space.Fidelity( - domain=space.Integer( + fidelity_integer1: int = neps_space.Fidelity( + domain=neps_space.Integer( min_value=1, max_value=1000, ), @@ -29,12 +29,12 @@ def test_fidelity_creation_raises_when_domain_has_prior(): ValueError, match=re.escape("The domain of a Fidelity can not have priors: "), ): - space.Fidelity( - domain=space.Integer( + neps_space.Fidelity( + domain=neps_space.Integer( min_value=1, max_value=1000, prior=10, - prior_confidence=space.ConfidenceLevel.MEDIUM, + prior_confidence=neps_space.ConfidenceLevel.MEDIUM, ), ) @@ -49,7 +49,7 @@ def test_fidelity_resolution_raises_when_resolved_with_no_environment_value(): "No value is available in the environment for fidelity 'fidelity_integer1'.", ), ): - space.resolve(pipeline=pipeline) + neps_space.resolve(pipeline=pipeline) def test_fidelity_resolution_raises_when_resolved_with_invalid_value(): @@ -63,7 +63,7 @@ def test_fidelity_resolution_raises_when_resolved_with_invalid_value(): "Value for fidelity with name 'fidelity_integer1' is outside its allowed range [1, 1000]. Received: -10." ), ): - space.resolve( + neps_space.resolve( pipeline=pipeline, environment_values={"fidelity_integer1": -10}, ) @@ -74,7 +74,7 @@ def test_fidelity_resolution_works(): # Resolve a pipeline which contains a fidelity, # with a valid value for it in the environment. - resolved_pipeline, resolution_context = space.resolve( + resolved_pipeline, resolution_context = neps_space.resolve( pipeline=pipeline, environment_values={"fidelity_integer1": 10}, ) @@ -96,9 +96,9 @@ def test_fidelity_resolution_with_context_works(): # Resolve a pipeline which contains a fidelity, # with a valid value for it in the environment. - resolved_pipeline, resolution_context = space.resolve( + resolved_pipeline, resolution_context = neps_space.resolve( pipeline=pipeline, - domain_sampler=space.OnlyPredefinedValuesSampler( + domain_sampler=neps_space.OnlyPredefinedValuesSampler( predefined_samplings=samplings_to_make, ), environment_values=environment_values, diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index 78fb4ee2f..686343fdb 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -2,130 +2,138 @@ import pytest -from neps.space.new_space import config_string, space +from neps.space.neps_spaces import config_string, neps_space -class GrammarLike(space.Pipeline): - _id = space.Operation(operator="Identity") - _three = space.Operation(operator="Conv2D-3") - _one = space.Operation(operator="Conv2D-1") - _reluconvbn = space.Operation(operator="ReLUConvBN") +class GrammarLike(neps_space.Pipeline): + _id = neps_space.Operation(operator="Identity") + _three = neps_space.Operation(operator="Conv2D-3") + _one = neps_space.Operation(operator="Conv2D-1") + _reluconvbn = neps_space.Operation(operator="ReLUConvBN") - _O = space.Categorical(choices=(_three, _one, _id)) + _O = neps_space.Categorical(choices=(_three, _one, _id)) - _C0 = space.Operation( + _C0 = neps_space.Operation( operator="Sequential", - args=(space.Resampled(_O),), + args=(neps_space.Resampled(_O),), ) - _C1 = space.Operation( + _C1 = neps_space.Operation( operator="Sequential", - args=(space.Resampled(_O), space.Resampled("S"), _reluconvbn), + args=(neps_space.Resampled(_O), neps_space.Resampled("S"), _reluconvbn), ) - _C2 = space.Operation( + _C2 = neps_space.Operation( operator="Sequential", - args=(space.Resampled(_O), space.Resampled("S")), + args=(neps_space.Resampled(_O), neps_space.Resampled("S")), ) - _C3 = space.Operation( + _C3 = neps_space.Operation( operator="Sequential", - args=(space.Resampled("S"),), + args=(neps_space.Resampled("S"),), ) - _C = space.Categorical( + _C = neps_space.Categorical( choices=( - space.Resampled(_C0), - space.Resampled(_C1), - space.Resampled(_C2), - space.Resampled(_C3), + neps_space.Resampled(_C0), + neps_space.Resampled(_C1), + neps_space.Resampled(_C2), + neps_space.Resampled(_C3), ), ) - _S0 = space.Operation( + _S0 = neps_space.Operation( operator="Sequential", - args=(space.Resampled(_C),), + args=(neps_space.Resampled(_C),), ) - _S1 = space.Operation( + _S1 = neps_space.Operation( operator="Sequential", args=(_reluconvbn,), ) - _S2 = space.Operation( + _S2 = neps_space.Operation( operator="Sequential", - args=(space.Resampled("S"),), + args=(neps_space.Resampled("S"),), ) - _S3 = space.Operation( + _S3 = neps_space.Operation( operator="Sequential", - args=(space.Resampled("S"), space.Resampled(_C)), + args=(neps_space.Resampled("S"), neps_space.Resampled(_C)), ) - _S4 = space.Operation( + _S4 = neps_space.Operation( operator="Sequential", - args=(space.Resampled(_O), space.Resampled(_O), space.Resampled(_O)), + args=( + neps_space.Resampled(_O), + neps_space.Resampled(_O), + neps_space.Resampled(_O), + ), ) - _S5 = space.Operation( + _S5 = neps_space.Operation( operator="Sequential", args=( - space.Resampled("S"), - space.Resampled("S"), - space.Resampled(_O), - space.Resampled(_O), - space.Resampled(_O), - space.Resampled(_O), - space.Resampled(_O), - space.Resampled(_O), + neps_space.Resampled("S"), + neps_space.Resampled("S"), + neps_space.Resampled(_O), + neps_space.Resampled(_O), + neps_space.Resampled(_O), + neps_space.Resampled(_O), + neps_space.Resampled(_O), + neps_space.Resampled(_O), ), ) - S = space.Categorical( + S = neps_space.Categorical( choices=( - space.Resampled(_S0), - space.Resampled(_S1), - space.Resampled(_S2), - space.Resampled(_S3), - space.Resampled(_S4), - space.Resampled(_S5), + neps_space.Resampled(_S0), + neps_space.Resampled(_S1), + neps_space.Resampled(_S2), + neps_space.Resampled(_S3), + neps_space.Resampled(_S4), + neps_space.Resampled(_S5), ), ) -class GrammarLikeAlt(space.Pipeline): - _id = space.Operation(operator="Identity") - _three = space.Operation(operator="Conv2D-3") - _one = space.Operation(operator="Conv2D-1") - _reluconvbn = space.Operation(operator="ReLUConvBN") +class GrammarLikeAlt(neps_space.Pipeline): + _id = neps_space.Operation(operator="Identity") + _three = neps_space.Operation(operator="Conv2D-3") + _one = neps_space.Operation(operator="Conv2D-1") + _reluconvbn = neps_space.Operation(operator="ReLUConvBN") - _O = space.Categorical(choices=(_three, _one, _id)) + _O = neps_space.Categorical(choices=(_three, _one, _id)) - _C_ARGS = space.Categorical( + _C_ARGS = neps_space.Categorical( choices=( - (space.Resampled(_O),), - (space.Resampled(_O), space.Resampled("S"), _reluconvbn), - (space.Resampled(_O), space.Resampled("S")), - (space.Resampled("S"),), + (neps_space.Resampled(_O),), + (neps_space.Resampled(_O), neps_space.Resampled("S"), _reluconvbn), + (neps_space.Resampled(_O), neps_space.Resampled("S")), + (neps_space.Resampled("S"),), ), ) - _C = space.Operation( + _C = neps_space.Operation( operator="Sequential", - args=space.Resampled(_C_ARGS), + args=neps_space.Resampled(_C_ARGS), ) - _S_ARGS = space.Categorical( + _S_ARGS = neps_space.Categorical( choices=( - (space.Resampled(_C),), + (neps_space.Resampled(_C),), (_reluconvbn,), - (space.Resampled("S"),), - (space.Resampled("S"), space.Resampled(_C)), - (space.Resampled(_O), space.Resampled(_O), space.Resampled(_O)), + (neps_space.Resampled("S"),), + (neps_space.Resampled("S"), neps_space.Resampled(_C)), + ( + neps_space.Resampled(_O), + neps_space.Resampled(_O), + neps_space.Resampled(_O), + ), ( - space.Resampled("S"), - space.Resampled("S"), - space.Resampled(_O), - space.Resampled(_O), - space.Resampled(_O), - space.Resampled(_O), - space.Resampled(_O), - space.Resampled(_O), + neps_space.Resampled("S"), + neps_space.Resampled("S"), + neps_space.Resampled(_O), + neps_space.Resampled(_O), + neps_space.Resampled(_O), + neps_space.Resampled(_O), + neps_space.Resampled(_O), + neps_space.Resampled(_O), ), ), ) - S = space.Operation( + S = neps_space.Operation( operator="Sequential", - args=space.Resampled(_S_ARGS), + args=neps_space.Resampled(_S_ARGS), ) @@ -134,13 +142,13 @@ def test_resolve(): pipeline = GrammarLike() try: - resolved_pipeline, resolution_context = space.resolve(pipeline) + resolved_pipeline, resolution_context = neps_space.resolve(pipeline) except RecursionError: pytest.xfail("XFAIL due to too much recursion.") raise s = resolved_pipeline.S - s_config_string = space.convert_operation_to_string(s) + s_config_string = neps_space.convert_operation_to_string(s) assert s_config_string pretty_config = config_string.ConfigString(s_config_string).pretty_format() assert pretty_config @@ -151,13 +159,13 @@ def test_resolve_alt(): pipeline = GrammarLikeAlt() try: - resolved_pipeline, resolution_context = space.resolve(pipeline) + resolved_pipeline, resolution_context = neps_space.resolve(pipeline) except RecursionError: pytest.xfail("XFAIL due to too much recursion.") raise s = resolved_pipeline.S - s_config_string = space.convert_operation_to_string(s) + s_config_string = neps_space.convert_operation_to_string(s) assert s_config_string pretty_config = config_string.ConfigString(s_config_string).pretty_format() assert pretty_config @@ -268,9 +276,9 @@ def test_resolve_context(): pipeline = GrammarLike() - resolved_pipeline, resolution_context = space.resolve( + resolved_pipeline, resolution_context = neps_space.resolve( pipeline, - domain_sampler=space.OnlyPredefinedValuesSampler( + domain_sampler=neps_space.OnlyPredefinedValuesSampler( predefined_samplings=samplings_to_make, ), ) @@ -286,7 +294,7 @@ def test_resolve_context(): assert sampled_values == samplings_to_make s = resolved_pipeline.S - s_config_string = space.convert_operation_to_string(s) + s_config_string = neps_space.convert_operation_to_string(s) assert s_config_string assert s_config_string == expected_s_config_string @@ -355,9 +363,9 @@ def test_resolve_context_alt(): pipeline = GrammarLikeAlt() - resolved_pipeline, resolution_context = space.resolve( + resolved_pipeline, resolution_context = neps_space.resolve( pipeline, - domain_sampler=space.OnlyPredefinedValuesSampler( + domain_sampler=neps_space.OnlyPredefinedValuesSampler( predefined_samplings=samplings_to_make, ), ) @@ -373,6 +381,6 @@ def test_resolve_context_alt(): assert sampled_values == samplings_to_make s = resolved_pipeline.S - s_config_string = space.convert_operation_to_string(s) + s_config_string = neps_space.convert_operation_to_string(s) assert s_config_string assert s_config_string == expected_s_config_string diff --git a/tests/test_neps_space/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py index 42dd5518d..5d9d61f4c 100644 --- a/tests/test_neps_space/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -2,10 +2,10 @@ import pytest -from neps.space.new_space import config_string, space +from neps.space.neps_spaces import config_string, neps_space -class HNASLikePipeline(space.Pipeline): +class HNASLikePipeline(neps_space.Pipeline): """Based on the `hierarchical+shared` variant (cell block is shared everywhere). Across _CONVBLOCK items, _ACT and _CONV also shared. Only the _NORM changes. @@ -17,186 +17,188 @@ class HNASLikePipeline(space.Pipeline): # Adding `PReLU` with a float hyperparameter `init` # Note that the sampled `_prelu_init_value` will be shared across all `_PRELU` uses, # since no `Resampled` was requested for it - _prelu_init_value = space.Float(min_value=0.1, max_value=0.9) - _PRELU = space.Operation( + _prelu_init_value = neps_space.Float(min_value=0.1, max_value=0.9) + _PRELU = neps_space.Operation( operator="ACT prelu", kwargs={"init": _prelu_init_value}, ) # ------------------------------------------------------ # Added `_PRELU` to the possible `_ACT` choices - _ACT = space.Categorical( + _ACT = neps_space.Categorical( choices=( - space.Operation(operator="ACT relu"), - space.Operation(operator="ACT hardswish"), - space.Operation(operator="ACT mish"), + neps_space.Operation(operator="ACT relu"), + neps_space.Operation(operator="ACT hardswish"), + neps_space.Operation(operator="ACT mish"), _PRELU, ), ) - _CONV = space.Categorical( + _CONV = neps_space.Categorical( choices=( - space.Operation(operator="CONV conv1x1"), - space.Operation(operator="CONV conv3x3"), - space.Operation(operator="CONV dconv3x3"), + neps_space.Operation(operator="CONV conv1x1"), + neps_space.Operation(operator="CONV conv3x3"), + neps_space.Operation(operator="CONV dconv3x3"), ), ) - _NORM = space.Categorical( + _NORM = neps_space.Categorical( choices=( - space.Operation(operator="NORM batch"), - space.Operation(operator="NORM instance"), - space.Operation(operator="NORM layer"), + neps_space.Operation(operator="NORM batch"), + neps_space.Operation(operator="NORM instance"), + neps_space.Operation(operator="NORM layer"), ), ) - _CONVBLOCK = space.Operation( + _CONVBLOCK = neps_space.Operation( operator="CONVBLOCK Sequential3", args=( _ACT, _CONV, - space.Resampled(_NORM), + neps_space.Resampled(_NORM), ), ) - _CONVBLOCK_FULL = space.Operation( + _CONVBLOCK_FULL = neps_space.Operation( operator="OPS Sequential1", - args=(space.Resampled(_CONVBLOCK),), + args=(neps_space.Resampled(_CONVBLOCK),), ) - _OP = space.Categorical( + _OP = neps_space.Categorical( choices=( - space.Operation(operator="OPS zero"), - space.Operation(operator="OPS id"), - space.Operation(operator="OPS avg_pool"), - space.Resampled(_CONVBLOCK_FULL), + neps_space.Operation(operator="OPS zero"), + neps_space.Operation(operator="OPS id"), + neps_space.Operation(operator="OPS avg_pool"), + neps_space.Resampled(_CONVBLOCK_FULL), ), ) - CL = space.Operation( + CL = neps_space.Operation( operator="CELL Cell", args=( - space.Resampled(_OP), - space.Resampled(_OP), - space.Resampled(_OP), - space.Resampled(_OP), - space.Resampled(_OP), - space.Resampled(_OP), + neps_space.Resampled(_OP), + neps_space.Resampled(_OP), + neps_space.Resampled(_OP), + neps_space.Resampled(_OP), + neps_space.Resampled(_OP), + neps_space.Resampled(_OP), ), ) - _C = space.Categorical( + _C = neps_space.Categorical( choices=( - space.Operation(operator="C Sequential2", args=(CL, CL)), - space.Operation(operator="C Sequential3", args=(CL, CL, CL)), - space.Operation(operator="C Residual2", args=(CL, CL, CL)), + neps_space.Operation(operator="C Sequential2", args=(CL, CL)), + neps_space.Operation(operator="C Sequential3", args=(CL, CL, CL)), + neps_space.Operation(operator="C Residual2", args=(CL, CL, CL)), ), ) - _RESBLOCK = space.Operation(operator="resBlock") - _DOWN = space.Categorical( + _RESBLOCK = neps_space.Operation(operator="resBlock") + _DOWN = neps_space.Categorical( choices=( - space.Operation(operator="DOWN Sequential2", args=(CL, _RESBLOCK)), - space.Operation(operator="DOWN Sequential3", args=(CL, CL, _RESBLOCK)), - space.Operation(operator="DOWN Residual2", args=(CL, _RESBLOCK, _RESBLOCK)), + neps_space.Operation(operator="DOWN Sequential2", args=(CL, _RESBLOCK)), + neps_space.Operation(operator="DOWN Sequential3", args=(CL, CL, _RESBLOCK)), + neps_space.Operation( + operator="DOWN Residual2", args=(CL, _RESBLOCK, _RESBLOCK) + ), ), ) - _D0 = space.Categorical( + _D0 = neps_space.Categorical( choices=( - space.Operation( + neps_space.Operation( operator="D0 Sequential3", args=( - space.Resampled(_C), - space.Resampled(_C), + neps_space.Resampled(_C), + neps_space.Resampled(_C), CL, ), ), - space.Operation( + neps_space.Operation( operator="D0 Sequential4", args=( - space.Resampled(_C), - space.Resampled(_C), - space.Resampled(_C), + neps_space.Resampled(_C), + neps_space.Resampled(_C), + neps_space.Resampled(_C), CL, ), ), - space.Operation( + neps_space.Operation( operator="D0 Residual3", args=( - space.Resampled(_C), - space.Resampled(_C), + neps_space.Resampled(_C), + neps_space.Resampled(_C), CL, CL, ), ), ), ) - _D1 = space.Categorical( + _D1 = neps_space.Categorical( choices=( - space.Operation( + neps_space.Operation( operator="D1 Sequential3", args=( - space.Resampled(_C), - space.Resampled(_C), - space.Resampled(_DOWN), + neps_space.Resampled(_C), + neps_space.Resampled(_C), + neps_space.Resampled(_DOWN), ), ), - space.Operation( + neps_space.Operation( operator="D1 Sequential4", args=( - space.Resampled(_C), - space.Resampled(_C), - space.Resampled(_C), - space.Resampled(_DOWN), + neps_space.Resampled(_C), + neps_space.Resampled(_C), + neps_space.Resampled(_C), + neps_space.Resampled(_DOWN), ), ), - space.Operation( + neps_space.Operation( operator="D1 Residual3", args=( - space.Resampled(_C), - space.Resampled(_C), - space.Resampled(_DOWN), - space.Resampled(_DOWN), + neps_space.Resampled(_C), + neps_space.Resampled(_C), + neps_space.Resampled(_DOWN), + neps_space.Resampled(_DOWN), ), ), ), ) - _D2 = space.Categorical( + _D2 = neps_space.Categorical( choices=( - space.Operation( + neps_space.Operation( operator="D2 Sequential3", args=( - space.Resampled(_D1), - space.Resampled(_D1), - space.Resampled(_D0), + neps_space.Resampled(_D1), + neps_space.Resampled(_D1), + neps_space.Resampled(_D0), ), ), - space.Operation( + neps_space.Operation( operator="D2 Sequential3", args=( - space.Resampled(_D0), - space.Resampled(_D1), - space.Resampled(_D1), + neps_space.Resampled(_D0), + neps_space.Resampled(_D1), + neps_space.Resampled(_D1), ), ), - space.Operation( + neps_space.Operation( operator="D2 Sequential4", args=( - space.Resampled(_D1), - space.Resampled(_D1), - space.Resampled(_D0), - space.Resampled(_D0), + neps_space.Resampled(_D1), + neps_space.Resampled(_D1), + neps_space.Resampled(_D0), + neps_space.Resampled(_D0), ), ), ), ) - ARCH: space.Operation = _D2 + ARCH: neps_space.Operation = _D2 @pytest.mark.repeat(500) def test_hnas_like(): pipeline = HNASLikePipeline() - resolved_pipeline, resolution_context = space.resolve(pipeline) + resolved_pipeline, resolution_context = neps_space.resolve(pipeline) assert resolved_pipeline is not None assert resolution_context.samplings_made is not None assert tuple(resolved_pipeline.get_attrs().keys()) == ("CL", "ARCH") @@ -206,16 +208,16 @@ def test_hnas_like(): def test_hnas_like_string(): pipeline = HNASLikePipeline() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) arch = resolved_pipeline.ARCH - arch_config_string = space.convert_operation_to_string(arch) + arch_config_string = neps_space.convert_operation_to_string(arch) assert arch_config_string pretty_config = config_string.ConfigString(arch_config_string).pretty_format() assert pretty_config cl = resolved_pipeline.CL - cl_config_string = space.convert_operation_to_string(cl) + cl_config_string = neps_space.convert_operation_to_string(cl) assert cl_config_string pretty_config = config_string.ConfigString(cl_config_string).pretty_format() assert pretty_config @@ -340,9 +342,9 @@ def test_hnas_like_context(): pipeline = HNASLikePipeline() - resolved_pipeline, resolution_context = space.resolve( + resolved_pipeline, resolution_context = neps_space.resolve( pipeline=pipeline, - domain_sampler=space.OnlyPredefinedValuesSampler( + domain_sampler=neps_space.OnlyPredefinedValuesSampler( predefined_samplings=samplings_to_make, ), ) @@ -358,14 +360,14 @@ def test_hnas_like_context(): assert sampled_values == samplings_to_make cl = resolved_pipeline.CL - cl_config_string = space.convert_operation_to_string(cl) + cl_config_string = neps_space.convert_operation_to_string(cl) assert cl_config_string assert cl_config_string == expected_cl_config_string assert "NORM batch" in cl_config_string assert "NORM layer" in cl_config_string arch = resolved_pipeline.ARCH - arch_config_string = space.convert_operation_to_string(arch) + arch_config_string = neps_space.convert_operation_to_string(arch) assert arch_config_string assert arch_config_string == expected_arch_config_string assert cl_config_string in arch_config_string diff --git a/tests/test_neps_space/test_search_space__nos_like.py b/tests/test_neps_space/test_search_space__nos_like.py index 5e48663a4..a650837ce 100644 --- a/tests/test_neps_space/test_search_space__nos_like.py +++ b/tests/test_neps_space/test_search_space__nos_like.py @@ -2,107 +2,107 @@ import pytest -from neps.space.new_space import config_string, space +from neps.space.neps_spaces import config_string, neps_space -class NosBench(space.Pipeline): - _UNARY_FUN = space.Categorical( +class NosBench(neps_space.Pipeline): + _UNARY_FUN = neps_space.Categorical( choices=( - space.Operation(operator="Square"), - space.Operation(operator="Exp"), - space.Operation(operator="Log"), + neps_space.Operation(operator="Square"), + neps_space.Operation(operator="Exp"), + neps_space.Operation(operator="Log"), ) ) - _BINARY_FUN = space.Categorical( + _BINARY_FUN = neps_space.Categorical( choices=( - space.Operation(operator="Add"), - space.Operation(operator="Sub"), - space.Operation(operator="Mul"), + neps_space.Operation(operator="Add"), + neps_space.Operation(operator="Sub"), + neps_space.Operation(operator="Mul"), ) ) - _TERNARY_FUN = space.Categorical( + _TERNARY_FUN = neps_space.Categorical( choices=( - space.Operation(operator="Interpolate"), - space.Operation(operator="Bias_Correct"), + neps_space.Operation(operator="Interpolate"), + neps_space.Operation(operator="Bias_Correct"), ) ) - _PARAMS = space.Categorical( + _PARAMS = neps_space.Categorical( choices=( - space.Operation(operator="Params"), - space.Operation(operator="Gradient"), - space.Operation(operator="Opt_Step"), + neps_space.Operation(operator="Params"), + neps_space.Operation(operator="Gradient"), + neps_space.Operation(operator="Opt_Step"), ) ) - _CONST = space.Integer(3, 8) - _VAR = space.Integer(9, 19) + _CONST = neps_space.Integer(3, 8) + _VAR = neps_space.Integer(9, 19) - _POINTER = space.Categorical( + _POINTER = neps_space.Categorical( choices=( - space.Resampled(_PARAMS), - space.Resampled(_CONST), - space.Resampled(_VAR), + neps_space.Resampled(_PARAMS), + neps_space.Resampled(_CONST), + neps_space.Resampled(_VAR), ), ) - _UNARY = space.Operation( + _UNARY = neps_space.Operation( operator="Unary", args=( - space.Resampled(_UNARY_FUN), - space.Resampled(_POINTER), + neps_space.Resampled(_UNARY_FUN), + neps_space.Resampled(_POINTER), ), ) - _BINARY = space.Operation( + _BINARY = neps_space.Operation( operator="Binary", args=( - space.Resampled(_BINARY_FUN), - space.Resampled(_POINTER), - space.Resampled(_POINTER), + neps_space.Resampled(_BINARY_FUN), + neps_space.Resampled(_POINTER), + neps_space.Resampled(_POINTER), ), ) - _TERNARY = space.Operation( + _TERNARY = neps_space.Operation( operator="Ternary", args=( - space.Resampled(_TERNARY_FUN), - space.Resampled(_POINTER), - space.Resampled(_POINTER), - space.Resampled(_POINTER), + neps_space.Resampled(_TERNARY_FUN), + neps_space.Resampled(_POINTER), + neps_space.Resampled(_POINTER), + neps_space.Resampled(_POINTER), ), ) - _F_ARGS = space.Categorical( + _F_ARGS = neps_space.Categorical( choices=( - space.Resampled(_UNARY), - space.Resampled(_BINARY), - space.Resampled(_TERNARY), + neps_space.Resampled(_UNARY), + neps_space.Resampled(_BINARY), + neps_space.Resampled(_TERNARY), ), ) - _F = space.Operation( + _F = neps_space.Operation( operator="Function", - args=(space.Resampled(_F_ARGS),), - kwargs={"var": space.Resampled(_VAR)}, + args=(neps_space.Resampled(_F_ARGS),), + kwargs={"var": neps_space.Resampled(_VAR)}, ) - _L_ARGS = space.Categorical( + _L_ARGS = neps_space.Categorical( choices=( - (space.Resampled(_F),), - (space.Resampled(_F), space.Resampled("_L")), + (neps_space.Resampled(_F),), + (neps_space.Resampled(_F), neps_space.Resampled("_L")), ), ) - _L = space.Operation( + _L = neps_space.Operation( operator="Line_operator", - args=space.Resampled(_L_ARGS), + args=neps_space.Resampled(_L_ARGS), ) - P = space.Operation( + P = neps_space.Operation( operator="Program", - args=(space.Resampled(_L),), + args=(neps_space.Resampled(_L),), ) @@ -111,13 +111,13 @@ def test_resolve(): pipeline = NosBench() try: - resolved_pipeline, resolution_context = space.resolve(pipeline) + resolved_pipeline, resolution_context = neps_space.resolve(pipeline) except RecursionError: pytest.xfail("XFAIL due to too much recursion.") raise p = resolved_pipeline.P - p_config_string = space.convert_operation_to_string(p) + p_config_string = neps_space.convert_operation_to_string(p) assert p_config_string pretty_config = config_string.ConfigString(p_config_string).pretty_format() assert pretty_config diff --git a/tests/test_neps_space/test_search_space__recursion.py b/tests/test_neps_space/test_search_space__recursion.py index 720d07f53..07f696df6 100644 --- a/tests/test_neps_space/test_search_space__recursion.py +++ b/tests/test_neps_space/test_search_space__recursion.py @@ -2,7 +2,7 @@ from collections.abc import Callable, Sequence -from neps.space.new_space import space +from neps.space.neps_spaces import neps_space class Model: @@ -30,12 +30,12 @@ def __call__(self, values: Sequence[float]) -> float: return sum(values) -class DemoRecursiveOperationSpace(space.Pipeline): +class DemoRecursiveOperationSpace(neps_space.Pipeline): # The way to sample `factor` values - _factor = space.Float(min_value=0, max_value=1) + _factor = neps_space.Float(min_value=0, max_value=1) # Sum - _sum = space.Operation(operator=Sum) + _sum = neps_space.Operation(operator=Sum) # Model # Can recursively request itself as an arg. @@ -46,12 +46,12 @@ class DemoRecursiveOperationSpace(space.Pipeline): # ... # If we want the `factor` values to be different, # we just request a resample for them - _inner_function = space.Categorical( - choices=(_sum, space.Resampled("model")), + _inner_function = neps_space.Categorical( + choices=(_sum, neps_space.Resampled("model")), ) - model = space.Operation( + model = neps_space.Operation( operator=Model, - args=(space.Resampled(_inner_function),), + args=(neps_space.Resampled(_inner_function),), kwargs={"factor": _factor}, ) @@ -65,7 +65,7 @@ def test_recursion(): seen_inner_model_counts = [] for _ in range(200): - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) model = resolved_pipeline.model assert model.operator is Model diff --git a/tests/test_neps_space/test_search_space__resampled.py b/tests/test_neps_space/test_search_space__resampled.py index b0e33190c..92d8da1fb 100644 --- a/tests/test_neps_space/test_search_space__resampled.py +++ b/tests/test_neps_space/test_search_space__resampled.py @@ -2,79 +2,79 @@ import pytest -from neps.space.new_space import space +from neps.space.neps_spaces import neps_space -class ActPipelineSimpleFloat(space.Pipeline): - prelu_init_value = space.Float( +class ActPipelineSimpleFloat(neps_space.Pipeline): + prelu_init_value = neps_space.Float( min_value=0, max_value=1000000, log=False, prior=0.25, - prior_confidence=space.ConfidenceLevel.LOW, + prior_confidence=neps_space.ConfidenceLevel.LOW, ) - prelu_shared1 = space.Operation( + prelu_shared1 = neps_space.Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - prelu_shared2 = space.Operation( + prelu_shared2 = neps_space.Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - prelu_own_clone1 = space.Operation( + prelu_own_clone1 = neps_space.Operation( operator="prelu", - kwargs={"init": space.Resampled(prelu_init_value)}, + kwargs={"init": neps_space.Resampled(prelu_init_value)}, ) - prelu_own_clone2 = space.Operation( + prelu_own_clone2 = neps_space.Operation( operator="prelu", - kwargs={"init": space.Resampled(prelu_init_value)}, + kwargs={"init": neps_space.Resampled(prelu_init_value)}, ) - _prelu_init_resampled = space.Resampled(prelu_init_value) - prelu_common_clone1 = space.Operation( + _prelu_init_resampled = neps_space.Resampled(prelu_init_value) + prelu_common_clone1 = neps_space.Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, ) - prelu_common_clone2 = space.Operation( + prelu_common_clone2 = neps_space.Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, ) -class ActPipelineComplexInteger(space.Pipeline): - prelu_init_value = space.Integer(min_value=0, max_value=1000000) +class ActPipelineComplexInteger(neps_space.Pipeline): + prelu_init_value = neps_space.Integer(min_value=0, max_value=1000000) - prelu_shared1 = space.Operation( + prelu_shared1 = neps_space.Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - prelu_shared2 = space.Operation( + prelu_shared2 = neps_space.Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - prelu_own_clone1 = space.Operation( + prelu_own_clone1 = neps_space.Operation( operator="prelu", - kwargs={"init": space.Resampled(prelu_init_value)}, + kwargs={"init": neps_space.Resampled(prelu_init_value)}, ) - prelu_own_clone2 = space.Operation( + prelu_own_clone2 = neps_space.Operation( operator="prelu", - kwargs={"init": space.Resampled(prelu_init_value)}, + kwargs={"init": neps_space.Resampled(prelu_init_value)}, ) - _prelu_init_resampled = space.Resampled(prelu_init_value) - prelu_common_clone1 = space.Operation( + _prelu_init_resampled = neps_space.Resampled(prelu_init_value) + prelu_common_clone1 = neps_space.Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, ) - prelu_common_clone2 = space.Operation( + prelu_common_clone2 = neps_space.Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, ) - act: space.Operation = space.Operation( + act: neps_space.Operation = neps_space.Operation( operator="sequential6", args=( prelu_shared1, @@ -88,42 +88,42 @@ class ActPipelineComplexInteger(space.Pipeline): "prelu_shared": prelu_shared1, "prelu_own_clone": prelu_own_clone1, "prelu_common_clone": prelu_common_clone1, - "resampled_hp_value": space.Resampled(prelu_init_value), + "resampled_hp_value": neps_space.Resampled(prelu_init_value), }, ) -class CellPipelineCategorical(space.Pipeline): - conv_block = space.Categorical( +class CellPipelineCategorical(neps_space.Pipeline): + conv_block = neps_space.Categorical( choices=( - space.Operation(operator="conv1"), - space.Operation(operator="conv2"), + neps_space.Operation(operator="conv1"), + neps_space.Operation(operator="conv2"), ), ) - op1 = space.Categorical( + op1 = neps_space.Categorical( choices=( conv_block, - space.Operation("op1"), + neps_space.Operation("op1"), ), ) - op2 = space.Categorical( + op2 = neps_space.Categorical( choices=( - space.Resampled(conv_block), - space.Operation("op2"), + neps_space.Resampled(conv_block), + neps_space.Operation("op2"), ), ) - _resampled_op1 = space.Resampled(op1) - cell = space.Operation( + _resampled_op1 = neps_space.Resampled(op1) + cell = neps_space.Operation( operator="cell", args=( op1, op2, _resampled_op1, - space.Resampled(op2), + neps_space.Resampled(op2), _resampled_op1, - space.Resampled(op2), + neps_space.Resampled(op2), ), ) @@ -132,7 +132,7 @@ class CellPipelineCategorical(space.Pipeline): def test_resampled_float(): pipeline = ActPipelineSimpleFloat() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) assert resolved_pipeline is not None assert tuple(resolved_pipeline.get_attrs().keys()) == ( @@ -173,7 +173,7 @@ def test_resampled_float(): def test_resampled_integer(): pipeline = ActPipelineComplexInteger() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) assert resolved_pipeline is not None assert tuple(resolved_pipeline.get_attrs().keys()) == ( @@ -243,7 +243,7 @@ def test_resampled_integer(): def test_resampled_categorical(): pipeline = CellPipelineCategorical() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) assert resolved_pipeline is not None assert tuple(resolved_pipeline.get_attrs().keys()) == ( @@ -261,8 +261,8 @@ def test_resampled_categorical(): assert op1 is not pipeline.op1 assert op2 is not pipeline.op2 - assert isinstance(op1, space.Operation) - assert isinstance(op2, space.Operation) + assert isinstance(op1, neps_space.Operation) + assert isinstance(op2, neps_space.Operation) assert (op1 is conv_block) or (op1.operator == "op1") assert op2.operator in ("conv1", "conv2", "op2") diff --git a/tests/test_neps_space/test_search_space__reuse_arch_elements.py b/tests/test_neps_space/test_search_space__reuse_arch_elements.py index 50fe5ec24..13b62a5e1 100644 --- a/tests/test_neps_space/test_search_space__reuse_arch_elements.py +++ b/tests/test_neps_space/test_search_space__reuse_arch_elements.py @@ -2,35 +2,35 @@ import pytest -from neps.space.new_space import space +from neps.space.neps_spaces import neps_space -class ActPipelineSimple(space.Pipeline): - prelu = space.Operation( +class ActPipelineSimple(neps_space.Pipeline): + prelu = neps_space.Operation( operator="prelu", kwargs={"init": 0.1}, ) - relu = space.Operation(operator="relu") + relu = neps_space.Operation(operator="relu") - act: space.Operation = space.Categorical( + act: neps_space.Operation = neps_space.Categorical( choices=(prelu, relu), ) -class ActPipelineComplex(space.Pipeline): - prelu_init_value: float = space.Float(min_value=0.1, max_value=0.9) - prelu = space.Operation( +class ActPipelineComplex(neps_space.Pipeline): + prelu_init_value: float = neps_space.Float(min_value=0.1, max_value=0.9) + prelu = neps_space.Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - act: space.Operation = space.Categorical( + act: neps_space.Operation = neps_space.Categorical( choices=(prelu,), ) -class FixedPipeline(space.Pipeline): +class FixedPipeline(neps_space.Pipeline): prelu_init_value: float = 0.5 - prelu = space.Operation( + prelu = neps_space.Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) @@ -40,41 +40,41 @@ class FixedPipeline(space.Pipeline): _conv_choices_low = ("conv1x1", "conv3x3") _conv_choices_high = ("conv5x5", "conv9x9") _conv_choices_prior_confidence_choices = ( - space.ConfidenceLevel.LOW, - space.ConfidenceLevel.MEDIUM, - space.ConfidenceLevel.HIGH, + neps_space.ConfidenceLevel.LOW, + neps_space.ConfidenceLevel.MEDIUM, + neps_space.ConfidenceLevel.HIGH, ) -class ConvPipeline(space.Pipeline): - conv_choices_prior_index: int = space.Integer( +class ConvPipeline(neps_space.Pipeline): + conv_choices_prior_index: int = neps_space.Integer( min_value=0, max_value=1, log=False, prior=0, - prior_confidence=space.ConfidenceLevel.LOW, + prior_confidence=neps_space.ConfidenceLevel.LOW, ) - conv_choices_prior_confidence: space.ConfidenceLevel = space.Categorical( + conv_choices_prior_confidence: neps_space.ConfidenceLevel = neps_space.Categorical( choices=_conv_choices_prior_confidence_choices, prior_index=1, - prior_confidence=space.ConfidenceLevel.LOW, + prior_confidence=neps_space.ConfidenceLevel.LOW, ) - conv_choices: tuple[str, ...] = space.Categorical( + conv_choices: tuple[str, ...] = neps_space.Categorical( choices=(_conv_choices_low, _conv_choices_high), prior_index=conv_choices_prior_index, prior_confidence=conv_choices_prior_confidence, ) - _conv1: str = space.Categorical( + _conv1: str = neps_space.Categorical( choices=conv_choices, ) - _conv2: str = space.Categorical( + _conv2: str = neps_space.Categorical( choices=conv_choices, ) - conv_block: space.Operation = space.Categorical( + conv_block: neps_space.Operation = neps_space.Categorical( choices=( - space.Operation( + neps_space.Operation( operator="sequential3", args=[_conv1, _conv2, _conv1], ), @@ -82,32 +82,32 @@ class ConvPipeline(space.Pipeline): ) -class CellPipeline(space.Pipeline): - _act = space.Operation(operator="relu") - _conv = space.Operation(operator="conv3x3") - _norm = space.Operation(operator="batch") +class CellPipeline(neps_space.Pipeline): + _act = neps_space.Operation(operator="relu") + _conv = neps_space.Operation(operator="conv3x3") + _norm = neps_space.Operation(operator="batch") - conv_block = space.Operation(operator="sequential3", args=(_act, _conv, _norm)) + conv_block = neps_space.Operation(operator="sequential3", args=(_act, _conv, _norm)) - op1 = space.Categorical( + op1 = neps_space.Categorical( choices=( conv_block, - space.Operation(operator="zero"), - space.Operation(operator="avg_pool"), + neps_space.Operation(operator="zero"), + neps_space.Operation(operator="avg_pool"), ), ) - op2 = space.Categorical( + op2 = neps_space.Categorical( choices=( conv_block, - space.Operation(operator="zero"), - space.Operation(operator="avg_pool"), + neps_space.Operation(operator="zero"), + neps_space.Operation(operator="avg_pool"), ), ) _some_int = 2 - _some_float = space.Float(min_value=0.5, max_value=0.5) + _some_float = neps_space.Float(min_value=0.5, max_value=0.5) - cell = space.Operation( + cell = neps_space.Operation( operator="cell", args=(op1, op2, op1, op2, op1, op2), kwargs={"float_hp": _some_float, "int_hp": _some_int}, @@ -118,7 +118,7 @@ class CellPipeline(space.Pipeline): def test_nested_simple(): pipeline = ActPipelineSimple() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) assert resolved_pipeline is not None assert tuple(resolved_pipeline.get_attrs().keys()) == ("prelu", "relu", "act") @@ -136,10 +136,10 @@ def test_nested_simple_string(): pipeline = ActPipelineSimple() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) act = resolved_pipeline.act - act_config_string = space.convert_operation_to_string(act) + act_config_string = neps_space.convert_operation_to_string(act) assert act_config_string assert act_config_string in possible_cell_config_strings @@ -148,7 +148,7 @@ def test_nested_simple_string(): def test_nested_complex(): pipeline = ActPipelineComplex() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) assert resolved_pipeline is not None assert tuple(resolved_pipeline.get_attrs().keys()) == ( @@ -175,10 +175,10 @@ def test_nested_complex(): def test_nested_complex_string(): pipeline = ActPipelineComplex() - resolved_pipeline, sampled_values = space.resolve(pipeline) + resolved_pipeline, sampled_values = neps_space.resolve(pipeline) act = resolved_pipeline.act - act_config_string = space.convert_operation_to_string(act) + act_config_string = neps_space.convert_operation_to_string(act) assert act_config_string # expected to look like: "(prelu {'init': 0.1087727907176638})" @@ -196,7 +196,7 @@ def test_nested_complex_string(): def test_fixed_pipeline(): pipeline = FixedPipeline() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) assert resolved_pipeline is not None assert tuple(resolved_pipeline.get_attrs().keys()) == tuple( @@ -212,10 +212,10 @@ def test_fixed_pipeline(): def test_fixed_pipeline_string(): pipeline = FixedPipeline() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) act = resolved_pipeline.act - act_config_string = space.convert_operation_to_string(act) + act_config_string = neps_space.convert_operation_to_string(act) assert act_config_string assert act_config_string == "(prelu {'init': 0.5})" @@ -224,7 +224,7 @@ def test_fixed_pipeline_string(): def test_simple_reuse(): pipeline = ConvPipeline() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) assert resolved_pipeline is not None assert tuple(resolved_pipeline.get_attrs().keys()) == ( @@ -265,10 +265,10 @@ def test_simple_reuse_string(): pipeline = ConvPipeline() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) conv_block = resolved_pipeline.conv_block - conv_block_config_string = space.convert_operation_to_string(conv_block) + conv_block_config_string = neps_space.convert_operation_to_string(conv_block) assert conv_block_config_string assert conv_block_config_string in possible_conv_block_config_strings @@ -277,7 +277,7 @@ def test_simple_reuse_string(): def test_shared_complex(): pipeline = CellPipeline() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) assert resolved_pipeline is not pipeline assert resolved_pipeline is not None @@ -295,8 +295,8 @@ def test_shared_complex(): op2 = resolved_pipeline.op2 assert op1 is not pipeline.op1 assert op2 is not pipeline.op2 - assert isinstance(op1, space.Operation) - assert isinstance(op2, space.Operation) + assert isinstance(op1, neps_space.Operation) + assert isinstance(op2, neps_space.Operation) if op1 is op2: assert op1 is conv_block @@ -336,10 +336,10 @@ def test_shared_complex_string(): pipeline = CellPipeline() - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) cell = resolved_pipeline.cell - cell_config_string = space.convert_operation_to_string(cell) + cell_config_string = neps_space.convert_operation_to_string(cell) assert cell_config_string assert cell_config_string in possible_cell_config_strings @@ -359,9 +359,9 @@ def test_shared_complex_context(): pipeline = CellPipeline() - resolved_pipeline_first, _resolution_context_first = space.resolve( + resolved_pipeline_first, _resolution_context_first = neps_space.resolve( pipeline=pipeline, - domain_sampler=space.OnlyPredefinedValuesSampler( + domain_sampler=neps_space.OnlyPredefinedValuesSampler( predefined_samplings=samplings_to_make, ), ) @@ -373,9 +373,9 @@ def test_shared_complex_context(): assert sampled_values_first == samplings_to_make assert list(sampled_values_first.items()) == list(samplings_to_make.items()) - resolved_pipeline_second, _resolution_context_second = space.resolve( + resolved_pipeline_second, _resolution_context_second = neps_space.resolve( pipeline=pipeline, - domain_sampler=space.OnlyPredefinedValuesSampler( + domain_sampler=neps_space.OnlyPredefinedValuesSampler( predefined_samplings=samplings_to_make, ), ) @@ -394,10 +394,10 @@ def test_shared_complex_context(): # however, their final results should be the same thing assert ( - space.convert_operation_to_string(resolved_pipeline_first.cell) + neps_space.convert_operation_to_string(resolved_pipeline_first.cell) == expected_config_string ) assert ( - space.convert_operation_to_string(resolved_pipeline_second.cell) + neps_space.convert_operation_to_string(resolved_pipeline_second.cell) == expected_config_string ) diff --git a/tests/test_neps_space/utils.py b/tests/test_neps_space/utils.py index 7090b0146..9710bd0f9 100644 --- a/tests/test_neps_space/utils.py +++ b/tests/test_neps_space/utils.py @@ -2,20 +2,20 @@ from collections.abc import Callable -from neps.space.new_space import space +from neps.space.neps_spaces import neps_space def generate_possible_config_strings( - pipeline: space.Pipeline, - resolved_pipeline_attr_getter: Callable[[space.Pipeline], space.Operation], + pipeline: neps_space.Pipeline, + resolved_pipeline_attr_getter: Callable[[neps_space.Pipeline], neps_space.Operation], num_resolutions: int = 50_000, ): result = set() for _ in range(num_resolutions): - resolved_pipeline, _resolution_context = space.resolve(pipeline) + resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) attr = resolved_pipeline_attr_getter(resolved_pipeline) - config_string = space.convert_operation_to_string(attr) + config_string = neps_space.convert_operation_to_string(attr) result.add(config_string) return result From 2f33c40760f987db2a92d1bf2ae7a86e5e2147a3 Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 2 Jul 2025 02:10:02 +0200 Subject: [PATCH 010/156] Refactored Optimizers - Added a new module `bracket_optimizer.py` that implements a bracket-based optimization strategy for multi-fidelity configurations in NePS spaces. - Introduced the `_BracketOptimizer` class to handle the optimization process, including sampling configurations from a prior band and managing different fidelity levels. - Implemented various bracket types such as successive halving, hyperband, ASHA, and async hyperband. - Updated `priorband.py` to utilize the new sampling strategies and refactored the prior sampling logic. - Created a new `sampling.py` module to define various samplers, including `OnlyPredefinedValuesSampler`, `RandomSampler`, `PriorOrFallbackSampler`, and mutation-based samplers. - Modified existing tests to accommodate the new optimizer and sampling strategies, ensuring compatibility with the updated structure. - Refactored import paths in test files to align with the new module organization. --- neps/space/neps_spaces/neps_space.py | 664 +----------------- .../neps_spaces/optimizers/algorithms.py | 306 ++++++++ .../{ => optimizers}/bracket_optimizer.py | 13 +- .../space/neps_spaces/optimizers/priorband.py | 17 +- neps/space/neps_spaces/sampling.py | 401 +++++++++++ .../test_neps_space/test_neps_integration.py | 21 +- ...st_neps_integration_priorband__max_cost.py | 7 +- ...t_neps_integration_priorband__max_evals.py | 7 +- .../test_search_space__fidelity.py | 6 +- .../test_search_space__grammar_like.py | 5 +- .../test_search_space__hnas_like.py | 3 +- .../test_search_space__reuse_arch_elements.py | 56 +- 12 files changed, 808 insertions(+), 698 deletions(-) create mode 100644 neps/space/neps_spaces/optimizers/algorithms.py rename neps/space/neps_spaces/{ => optimizers}/bracket_optimizer.py (96%) create mode 100644 neps/space/neps_spaces/sampling.py diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 56975c2bd..7f99a2329 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -11,12 +11,10 @@ import dataclasses import enum import functools -import heapq import math import random from collections.abc import Callable, Generator, Mapping, Sequence from typing import ( - TYPE_CHECKING, Any, Generic, Protocol, @@ -27,11 +25,7 @@ from neps.optimizers import optimizer from neps.space.neps_spaces import config_string - -if TYPE_CHECKING: - import neps.state.optimizer as optimizer_state - import neps.state.trial as trial_state - from neps.state.trial import Trial +from neps.space.neps_spaces.sampling import OnlyPredefinedValuesSampler, RandomSampler T = TypeVar("T") P = TypeVar("P", bound="Pipeline") @@ -880,391 +874,6 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Fidelity: # noqa: ARG002 # ------------------------------------------------- -class OnlyPredefinedValuesSampler(DomainSampler): - """A sampler that only returns predefined values for a given path. - If the path is not found in the predefined values, it raises a ValueError. - :param predefined_samplings: A mapping of paths to predefined values. - """ - - def __init__( - self, - predefined_samplings: Mapping[str, Any], - ): - """Initialize the sampler with predefined samplings. - :param predefined_samplings: A mapping of paths to predefined values. - :raises ValueError: If predefined_samplings is empty. - """ - self._predefined_samplings = predefined_samplings - - def __call__( - self, - *, - domain_obj: Domain[T], # noqa: ARG002 - current_path: str, - ) -> T: - """Sample a value from the predefined samplings for the given path. - :param domain_obj: The domain object, not used in this sampler. - :param current_path: The path for which to sample a value. - :return: The predefined value for the given path. - :raises ValueError: If the current path is not in the predefined samplings. - """ - if current_path not in self._predefined_samplings: - raise ValueError(f"No predefined value for path: {current_path!r}.") - return cast(T, self._predefined_samplings[current_path]) - - -class RandomSampler(DomainSampler): - """A sampler that randomly samples from a predefined set of values. - If the current path is not in the predefined values, it samples from the domain. - :param predefined_samplings: A mapping of paths to predefined values. - This sampler will use these values if available, otherwise it will sample from the - domain. - """ - - def __init__( - self, - predefined_samplings: Mapping[str, Any], - ): - """Initialize the sampler with predefined samplings. - :param predefined_samplings: A mapping of paths to predefined values. - :raises - ValueError: If predefined_samplings is empty. - """ - self._predefined_samplings = predefined_samplings - - def __call__( - self, - *, - domain_obj: Domain[T], - current_path: str, - ) -> T: - """Sample a value from the predefined samplings or the domain. - :param domain_obj: The domain object from which to sample. - :param current_path: The path for which to sample a value. - :return: A sampled value, either from the predefined samplings or from the - domain. - :raises ValueError: If the current path is not in the predefined samplings and - the domain does not have a prior defined. - """ - if current_path not in self._predefined_samplings: - sampled_value = domain_obj.sample() - else: - sampled_value = cast(T, self._predefined_samplings[current_path]) - return sampled_value - - -class PriorOrFallbackSampler(DomainSampler): - """A sampler that uses a prior value if available, otherwise falls back to another - sampler. - :param fallback_sampler: A DomainSampler to use if the prior is not available. - :param prior_use_probability: The probability of using the prior value when - available. - This should be a float between 0 and 1, where 0 means never use the prior and 1 means - always use it. - :raises ValueError: If the prior_use_probability is not between 0 and 1. - """ - - def __init__( - self, - fallback_sampler: DomainSampler, - prior_use_probability: float, - ): - """Initialize the sampler with a fallback sampler and a prior use probability. - :param fallback_sampler: A DomainSampler to use if the prior is not available. - :param prior_use_probability: The probability of using the prior value when - available. - This should be a float between 0 and 1, where 0 means never use the prior and 1 - means always use it. - :raises ValueError: If the prior_use_probability is not between 0 and 1. - """ - if not 0 <= prior_use_probability <= 1: - raise ValueError( - "The given `prior_use_probability` value is out of range:" - f" {prior_use_probability!r}." - ) - - self._fallback_sampler = fallback_sampler - self._prior_use_probability = prior_use_probability - - def __call__( - self, - *, - domain_obj: Domain[T], - current_path: str, - ) -> T: - """Sample a value from the domain, using the prior if available and according to - the prior use probability. - :param domain_obj: The domain object from which to sample. - :param current_path: The path for which to sample a value. - :return: A sampled value, either from the prior or from the fallback sampler. - :raises ValueError: If the domain does not have a prior defined and the fallback - sampler is not provided. - """ - use_prior = random.choices( - (True, False), - weights=(self._prior_use_probability, 1 - self._prior_use_probability), - k=1, - )[0] - if domain_obj.has_prior and use_prior: - return domain_obj.prior - return self._fallback_sampler( - domain_obj=domain_obj, - current_path=current_path, - ) - - -class MutateByForgettingSampler(DomainSampler): - """A sampler that mutates predefined samplings by forgetting a certain number of - them. It randomly selects a number of predefined samplings to forget and returns a - new sampler that only uses the remaining samplings. - :param predefined_samplings: A mapping of paths to predefined values. - :param n_forgets: The number of predefined samplings to forget. - This should be an integer greater than 0 and less than or equal to the number of - predefined samplings. - :raises ValueError: If n_forgets is not a valid integer or if it exceeds the number - of predefined samplings. - """ - - def __init__( - self, - predefined_samplings: Mapping[str, Any], - n_forgets: int, - ): - """Initialize the sampler with predefined samplings and a number of forgets. - :param predefined_samplings: A mapping of paths to predefined values. - :param n_forgets: The number of predefined samplings to forget. - This should be an integer greater than 0 and less than or equal to the number of - predefined samplings. - :raises ValueError: If n_forgets is not a valid integer or if it exceeds the - number of predefined samplings. - """ - if ( - not isinstance(n_forgets, int) - or n_forgets <= 0 - or n_forgets > len(predefined_samplings) - ): - raise ValueError(f"Invalid value for `n_forgets`: {n_forgets!r}.") - - mutated_samplings_to_make = _mutate_samplings_to_make_by_forgetting( - samplings_to_make=predefined_samplings, - n_forgets=n_forgets, - ) - - self._random_sampler = RandomSampler( - predefined_samplings=mutated_samplings_to_make, - ) - - def __call__( - self, - *, - domain_obj: Domain[T], - current_path: str, - ) -> T: - """Sample a value from the mutated predefined samplings or the domain. - :param domain_obj: The domain object from which to sample. - :param current_path: The path for which to sample a value. - :return: A sampled value, either from the mutated predefined samplings or from - the domain. - :raises ValueError: If the current path is not in the mutated predefined - samplings and the domain does not have a prior defined. - """ - return self._random_sampler(domain_obj=domain_obj, current_path=current_path) - - -class MutatateUsingCentersSampler(DomainSampler): - """A sampler that mutates predefined samplings by forgetting a certain number of them, - but still uses the original values as centers for sampling. - :param predefined_samplings: A mapping of paths to predefined values. - :param n_mutations: The number of predefined samplings to mutate. - This should be an integer greater than 0 and less than or equal to the number of - predefined samplings. - :raises ValueError: If n_mutations is not a valid integer or if it exceeds the number - of predefined samplings. - """ - - def __init__( - self, - predefined_samplings: Mapping[str, Any], - n_mutations: int, - ): - """Initialize the sampler with predefined samplings and a number of mutations. - :param predefined_samplings: A mapping of paths to predefined values. - :param n_mutations: The number of predefined samplings to mutate. - This should be an integer greater than 0 and less than or equal to the number of - predefined samplings. - :raises ValueError: If n_mutations is not a valid integer or if it exceeds - the number of predefined samplings. - """ - if ( - not isinstance(n_mutations, int) - or n_mutations <= 0 - or n_mutations > len(predefined_samplings) - ): - raise ValueError(f"Invalid value for `n_mutations`: {n_mutations!r}.") - - self._kept_samplings_to_make = _mutate_samplings_to_make_by_forgetting( - samplings_to_make=predefined_samplings, - n_forgets=n_mutations, - ) - - # Still remember the original choices. We'll use them as centers later. - self._original_samplings_to_make = predefined_samplings - - def __call__( - self, - *, - domain_obj: Domain[T], - current_path: str, - ) -> T: - """Sample a value from the predefined samplings or the domain, using original - values as centers if the current path is not in the kept samplings. - :param domain_obj: The domain object from which to sample. - :param current_path: The path for which to sample a value. - :return: A sampled value, either from the kept samplings or from the domain, - using the original values as centers if necessary. - :raises ValueError: If the current path is not in the kept samplings and the - domain does not have a prior defined. - """ - if current_path not in self._kept_samplings_to_make: - # For this path we either have forgotten the value or we never had it. - if current_path in self._original_samplings_to_make: - # We had a value for this path originally, use it as a center. - original_value = self._original_samplings_to_make[current_path] - sampled_value = domain_obj.centered_around( - center=original_value, - confidence=ConfidenceLevel.HIGH, - ).sample() - else: - # We never had a value for this path, we can only sample from the domain. - sampled_value = domain_obj.sample() - else: - # For this path we have chosen to keep the original value. - sampled_value = cast(T, self._kept_samplings_to_make[current_path]) - - return sampled_value - - -class CrossoverNotPossibleError(Exception): - """Exception raised when a crossover operation is not possible.""" - - -class CrossoverByMixingSampler(DomainSampler): - """A sampler that performs a crossover operation by mixing two sets of predefined - samplings. It combines the predefined samplings from two sources, allowing for a - probability-based - selection of values from either source. - :param predefined_samplings_1: The first set of predefined samplings. - :param predefined_samplings_2: The second set of predefined samplings. - :param prefer_first_probability: The probability of preferring values from the first - set over the second set when both have values for the same path. - This should be a float between 0 and 1, where 0 means always prefer the second set - and 1 means always prefer the first set. - :raises ValueError: If prefer_first_probability is not between 0 and 1. - :raises CrossoverNotPossibleError: If no crossovers were made between the two sets - of predefined samplings. - """ - - def __init__( - self, - predefined_samplings_1: Mapping[str, Any], - predefined_samplings_2: Mapping[str, Any], - prefer_first_probability: float, - ): - """Initialize the sampler with two sets of predefined samplings and a preference - probability for the first set. - :param predefined_samplings_1: The first set of predefined samplings. - :param predefined_samplings_2: The second set of predefined samplings. - :param prefer_first_probability: The probability of preferring values from the - first set over the second set when both have values for the same path. - This should be a float between 0 and 1, where 0 means always prefer the second - set and 1 means always prefer the first set. - :raises ValueError: If prefer_first_probability is not between 0 and 1. - """ - if not isinstance(prefer_first_probability, float) or not ( - 0 <= prefer_first_probability <= 1 - ): - raise ValueError( - "Invalid value for `prefer_first_probability`:" - f" {prefer_first_probability!r}." - ) - - ( - made_any_crossovers, - crossed_over_samplings_to_make, - ) = _crossover_samplings_to_make_by_mixing( - predefined_samplings_1=predefined_samplings_1, - predefined_samplings_2=predefined_samplings_2, - prefer_first_probability=prefer_first_probability, - ) - - if not made_any_crossovers: - raise CrossoverNotPossibleError("No crossovers were made.") - - self._random_sampler = RandomSampler( - predefined_samplings=crossed_over_samplings_to_make, - ) - - def __call__( - self, - *, - domain_obj: Domain[T], - current_path: str, - ) -> T: - """Sample a value from the crossed-over predefined samplings or the domain. - :param domain_obj: The domain object from which to sample. - :param current_path: The path for which to sample a value. - :return: A sampled value, either from the crossed-over predefined samplings or - from the domain. - :raises ValueError: If the current path is not in the crossed-over predefined - samplings and the domain does not have a prior defined. - """ - return self._random_sampler(domain_obj=domain_obj, current_path=current_path) - - -def _mutate_samplings_to_make_by_forgetting( - samplings_to_make: Mapping[str, Any], - n_forgets: int, -) -> Mapping[str, Any]: - mutated_samplings_to_make = dict(**samplings_to_make) - - samplings_to_delete = random.sample( - list(samplings_to_make.keys()), - k=n_forgets, - ) - - for choice_to_delete in samplings_to_delete: - mutated_samplings_to_make.pop(choice_to_delete) - - return mutated_samplings_to_make - - -def _crossover_samplings_to_make_by_mixing( - predefined_samplings_1: Mapping[str, Any], - predefined_samplings_2: Mapping[str, Any], - prefer_first_probability: float, -) -> tuple[bool, Mapping[str, Any]]: - crossed_over_samplings = dict(**predefined_samplings_1) - made_any_crossovers = False - - for path, sampled_value_in_2 in predefined_samplings_2.items(): - if path in crossed_over_samplings: - use_value_from_2 = random.choices( - (False, True), - weights=(prefer_first_probability, 1 - prefer_first_probability), - k=1, - )[0] - if use_value_from_2: - crossed_over_samplings[path] = sampled_value_in_2 - made_any_crossovers = True - else: - crossed_over_samplings[path] = sampled_value_in_2 - - return made_any_crossovers, crossed_over_samplings - - -# ------------------------------------------------- - - class SamplingResolutionContext: """A context for resolving samplings in a NePS space. It manages the resolution root, domain sampler, environment values, @@ -1910,277 +1519,6 @@ def convert_operation_to_string(operation: Operation) -> str: # ------------------------------------------------- -class RandomSearch: - """A simple random search optimizer for a NePS pipeline. - It samples configurations randomly from the pipeline's domain and environment values. - :param pipeline: The pipeline to optimize, which should be a Pipeline object. - :raises ValueError: If the pipeline is not a Pipeline object. - """ - - def __init__(self, pipeline: Pipeline): - """Initialize the RandomSearch optimizer with a pipeline. - :param pipeline: The pipeline to optimize, which should be a Pipeline object. - :raises ValueError: If the pipeline is not a Pipeline object. - """ - self._pipeline = pipeline - - self._environment_values = {} - fidelity_attrs = self._pipeline.fidelity_attrs - for fidelity_name, fidelity_obj in fidelity_attrs.items(): - self._environment_values[fidelity_name] = fidelity_obj.max_value - - self._random_sampler = RandomSampler(predefined_samplings={}) - - def __call__( - self, - trials: Mapping[str, trial_state.Trial], - budget_info: optimizer_state.BudgetInfo | None, # noqa: ARG002 - n: int | None = None, - ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: - """Sample configurations randomly from the pipeline's domain and environment - values. - :param trials: A mapping of trial IDs to Trial objects, representing previous - trials. - :param budget_info: The budget information for the optimization process. - :param n: The number of configurations to sample. If None, a single configuration - will be sampled. - :return: A SampledConfig object or a list of SampledConfig objects, depending - on the value of n. - :raises ValueError: If the pipeline is not a Pipeline object or if the trials are - not a valid mapping of trial IDs to Trial objects. - """ - n_prev_trials = len(trials) - n_requested = 1 if n is None else n - return_single = n is None - - chosen_pipelines = [ - resolve( - pipeline=self._pipeline, - domain_sampler=self._random_sampler, - environment_values=self._environment_values, - ) - for _ in range(n_requested) - ] - - return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) - - -class ComplexRandomSearch: - """A complex random search optimizer for a NePS pipeline. - It samples configurations randomly from the pipeline's domain and environment values, - and also performs mutations and crossovers based on previous successful trials. - :param pipeline: The pipeline to optimize, which should be a Pipeline object. - :raises ValueError: If the pipeline is not a Pipeline object. - """ - - def __init__(self, pipeline: Pipeline): - """Initialize the ComplexRandomSearch optimizer with a pipeline. - :param pipeline: The pipeline to optimize, which should be a Pipeline object. - :raises ValueError: If the pipeline is not a Pipeline object. - """ - self._pipeline = pipeline - - self._environment_values = {} - fidelity_attrs = self._pipeline.fidelity_attrs - for fidelity_name, fidelity_obj in fidelity_attrs.items(): - self._environment_values[fidelity_name] = fidelity_obj.max_value - - self._random_sampler = RandomSampler( - predefined_samplings={}, - ) - self._try_always_priors_sampler = PriorOrFallbackSampler( - fallback_sampler=self._random_sampler, - prior_use_probability=1, - ) - self._sometimes_priors_sampler = PriorOrFallbackSampler( - fallback_sampler=self._random_sampler, - prior_use_probability=0.1, - ) - - def __call__( - self, - trials: Mapping[str, trial_state.Trial], - budget_info: optimizer_state.BudgetInfo | None, # noqa: ARG002 - n: int | None = None, - ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: - """Sample configurations randomly from the pipeline's domain and environment - values, and also perform mutations and crossovers based on previous successful - trials. - :param trials: A mapping of trial IDs to Trial objects, representing previous - trials. - :param budget_info: The budget information for the optimization process. - :param n: The number of configurations to sample. If None, a single configuration - will be sampled. - :return: A SampledConfig object or a list of SampledConfig objects, depending - on the value of n. - :raises ValueError: If the pipeline is not a Pipeline object or if the trials are - not a valid mapping of trial IDs to Trial objects. - """ - n_prev_trials = len(trials) - n_requested = 1 if n is None else n - return_single = n is None - - random_pipelines = [ - resolve( - pipeline=self._pipeline, - domain_sampler=self._random_sampler, - environment_values=self._environment_values, - ) - for _ in range(n_requested * 5) - ] - sometimes_priors_pipelines = [ - resolve( - pipeline=self._pipeline, - domain_sampler=self._sometimes_priors_sampler, - environment_values=self._environment_values, - ) - for _ in range(n_requested * 5) - ] - - mutated_incumbents = [] - crossed_over_incumbents = [] - - successful_trials: list[Trial] = list( - filter( - lambda trial: trial.report.reported_as == trial.State.SUCCESS - if trial.report is not None - else False, - trials.values(), - ) - ) - if len(successful_trials) > 0: - n_top_trials = 5 - top_trials = heapq.nsmallest( - n_top_trials, - successful_trials, - key=lambda trial: float(trial.report.objective_to_minimize) - if trial.report and isinstance(trial.report.objective_to_minimize, float) - else float("inf"), - ) # Will have up to `n_top_trials` items. - - # Do some mutations. - for top_trial in top_trials: - top_trial_config = top_trial.config - - # Mutate by resampling around some values of the original config. - mutated_incumbents += [ - resolve( - pipeline=self._pipeline, - domain_sampler=MutatateUsingCentersSampler( - predefined_samplings=top_trial_config, - n_mutations=1, - ), - environment_values=self._environment_values, - ) - for _ in range(n_requested * 5) - ] - mutated_incumbents += [ - resolve( - pipeline=self._pipeline, - domain_sampler=MutatateUsingCentersSampler( - predefined_samplings=top_trial_config, - n_mutations=max( - 1, random.randint(1, int(len(top_trial_config) / 2)) - ), - ), - environment_values=self._environment_values, - ) - for _ in range(n_requested * 5) - ] - - # Mutate by completely forgetting some values of the original config. - mutated_incumbents += [ - resolve( - pipeline=self._pipeline, - domain_sampler=MutateByForgettingSampler( - predefined_samplings=top_trial_config, - n_forgets=1, - ), - environment_values=self._environment_values, - ) - for _ in range(n_requested * 5) - ] - mutated_incumbents += [ - resolve( - pipeline=self._pipeline, - domain_sampler=MutateByForgettingSampler( - predefined_samplings=top_trial_config, - n_forgets=max( - 1, random.randint(1, int(len(top_trial_config) / 2)) - ), - ), - environment_values=self._environment_values, - ) - for _ in range(n_requested * 5) - ] - - # Do some crossovers. - if len(top_trials) > 1: - for _ in range(n_requested * 3): - trial_1, trial_2 = random.sample(top_trials, k=2) - - try: - crossover_sampler = CrossoverByMixingSampler( - predefined_samplings_1=trial_1.config, - predefined_samplings_2=trial_2.config, - prefer_first_probability=0.5, - ) - except CrossoverNotPossibleError: - # A crossover was not possible for them. Do nothing. - pass - else: - crossed_over_incumbents.append( - resolve( - pipeline=self._pipeline, - domain_sampler=crossover_sampler, - environment_values=self._environment_values, - ), - ) - - try: - crossover_sampler = CrossoverByMixingSampler( - predefined_samplings_1=trial_2.config, - predefined_samplings_2=trial_1.config, - prefer_first_probability=0.5, - ) - except CrossoverNotPossibleError: - # A crossover was not possible for them. Do nothing. - pass - else: - crossed_over_incumbents.append( - resolve( - pipeline=self._pipeline, - domain_sampler=crossover_sampler, - environment_values=self._environment_values, - ), - ) - - all_sampled_pipelines = [ - *random_pipelines, - *sometimes_priors_pipelines, - *mutated_incumbents, - *crossed_over_incumbents, - ] - - # Here we can have a model which picks from all the sampled pipelines. - # Currently, we just pick randomly from them. - chosen_pipelines = random.sample(all_sampled_pipelines, k=n_requested) - - if n_prev_trials == 0: - # In this case, always include the prior pipeline. - prior_pipeline = resolve( - pipeline=self._pipeline, - domain_sampler=self._try_always_priors_sampler, - environment_values=self._environment_values, - ) - chosen_pipelines[0] = prior_pipeline - - return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) - - -# ------------------------------------------------- - - class NepsCompatConverter: """A class to convert between NePS configurations and NEPS-compatible configurations. It provides methods to convert a SamplingResolutionContext to a NEPS-compatible config diff --git a/neps/space/neps_spaces/optimizers/algorithms.py b/neps/space/neps_spaces/optimizers/algorithms.py new file mode 100644 index 000000000..c0bef3e9e --- /dev/null +++ b/neps/space/neps_spaces/optimizers/algorithms.py @@ -0,0 +1,306 @@ +"""Optimizers for NePS pipelines. +These optimizers implement various strategies for sampling configurations from a NePS +pipeline. They include simple random search, complex random search with mutation and +crossover, and more advanced sampling techniques that leverage prior knowledge and +successful trials. +""" + +from __future__ import annotations + +import heapq +import random +from collections.abc import Mapping +from typing import TYPE_CHECKING + +from neps.space.neps_spaces.neps_space import ( + Pipeline, + _prepare_sampled_configs, + resolve, +) +from neps.space.neps_spaces.sampling import ( + CrossoverByMixingSampler, + CrossoverNotPossibleError, + MutatateUsingCentersSampler, + MutateByForgettingSampler, + PriorOrFallbackSampler, + RandomSampler, +) + +if TYPE_CHECKING: + import neps.state.optimizer as optimizer_state + import neps.state.trial as trial_state + from neps.optimizers import optimizer + from neps.state.trial import Trial + + +class RandomSearch: + """A simple random search optimizer for a NePS pipeline. + It samples configurations randomly from the pipeline's domain and environment values. + :param pipeline: The pipeline to optimize, which should be a Pipeline object. + :raises ValueError: If the pipeline is not a Pipeline object. + """ + + def __init__(self, pipeline: Pipeline): + """Initialize the RandomSearch optimizer with a pipeline. + :param pipeline: The pipeline to optimize, which should be a Pipeline object. + :raises ValueError: If the pipeline is not a Pipeline object. + """ + self._pipeline = pipeline + + self._environment_values = {} + fidelity_attrs = self._pipeline.fidelity_attrs + for fidelity_name, fidelity_obj in fidelity_attrs.items(): + self._environment_values[fidelity_name] = fidelity_obj.max_value + + self._random_sampler = RandomSampler(predefined_samplings={}) + + def __call__( + self, + trials: Mapping[str, trial_state.Trial], + budget_info: optimizer_state.BudgetInfo | None, # noqa: ARG002 + n: int | None = None, + ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: + """Sample configurations randomly from the pipeline's domain and environment + values. + :param trials: A mapping of trial IDs to Trial objects, representing previous + trials. + :param budget_info: The budget information for the optimization process. + :param n: The number of configurations to sample. If None, a single configuration + will be sampled. + :return: A SampledConfig object or a list of SampledConfig objects, depending + on the value of n. + :raises ValueError: If the pipeline is not a Pipeline object or if the trials are + not a valid mapping of trial IDs to Trial objects. + """ + n_prev_trials = len(trials) + n_requested = 1 if n is None else n + return_single = n is None + + chosen_pipelines = [ + resolve( + pipeline=self._pipeline, + domain_sampler=self._random_sampler, + environment_values=self._environment_values, + ) + for _ in range(n_requested) + ] + + return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) + + +class ComplexRandomSearch: + """A complex random search optimizer for a NePS pipeline. + It samples configurations randomly from the pipeline's domain and environment values, + and also performs mutations and crossovers based on previous successful trials. + :param pipeline: The pipeline to optimize, which should be a Pipeline object. + :raises ValueError: If the pipeline is not a Pipeline object. + """ + + def __init__(self, pipeline: Pipeline): + """Initialize the ComplexRandomSearch optimizer with a pipeline. + :param pipeline: The pipeline to optimize, which should be a Pipeline object. + :raises ValueError: If the pipeline is not a Pipeline object. + """ + self._pipeline = pipeline + + self._environment_values = {} + fidelity_attrs = self._pipeline.fidelity_attrs + for fidelity_name, fidelity_obj in fidelity_attrs.items(): + self._environment_values[fidelity_name] = fidelity_obj.max_value + + self._random_sampler = RandomSampler( + predefined_samplings={}, + ) + self._try_always_priors_sampler = PriorOrFallbackSampler( + fallback_sampler=self._random_sampler, + prior_use_probability=1, + ) + self._sometimes_priors_sampler = PriorOrFallbackSampler( + fallback_sampler=self._random_sampler, + prior_use_probability=0.1, + ) + + def __call__( + self, + trials: Mapping[str, trial_state.Trial], + budget_info: optimizer_state.BudgetInfo | None, # noqa: ARG002 + n: int | None = None, + ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: + """Sample configurations randomly from the pipeline's domain and environment + values, and also perform mutations and crossovers based on previous successful + trials. + :param trials: A mapping of trial IDs to Trial objects, representing previous + trials. + :param budget_info: The budget information for the optimization process. + :param n: The number of configurations to sample. If None, a single configuration + will be sampled. + :return: A SampledConfig object or a list of SampledConfig objects, depending + on the value of n. + :raises ValueError: If the pipeline is not a Pipeline object or if the trials are + not a valid mapping of trial IDs to Trial objects. + """ + n_prev_trials = len(trials) + n_requested = 1 if n is None else n + return_single = n is None + + random_pipelines = [ + resolve( + pipeline=self._pipeline, + domain_sampler=self._random_sampler, + environment_values=self._environment_values, + ) + for _ in range(n_requested * 5) + ] + sometimes_priors_pipelines = [ + resolve( + pipeline=self._pipeline, + domain_sampler=self._sometimes_priors_sampler, + environment_values=self._environment_values, + ) + for _ in range(n_requested * 5) + ] + + mutated_incumbents = [] + crossed_over_incumbents = [] + + successful_trials: list[Trial] = list( + filter( + lambda trial: ( + trial.report.reported_as == trial.State.SUCCESS + if trial.report is not None + else False + ), + trials.values(), + ) + ) + if len(successful_trials) > 0: + n_top_trials = 5 + top_trials = heapq.nsmallest( + n_top_trials, + successful_trials, + key=lambda trial: ( + float(trial.report.objective_to_minimize) + if trial.report + and isinstance(trial.report.objective_to_minimize, float) + else float("inf") + ), + ) # Will have up to `n_top_trials` items. + + # Do some mutations. + for top_trial in top_trials: + top_trial_config = top_trial.config + + # Mutate by resampling around some values of the original config. + mutated_incumbents += [ + resolve( + pipeline=self._pipeline, + domain_sampler=MutatateUsingCentersSampler( + predefined_samplings=top_trial_config, + n_mutations=1, + ), + environment_values=self._environment_values, + ) + for _ in range(n_requested * 5) + ] + mutated_incumbents += [ + resolve( + pipeline=self._pipeline, + domain_sampler=MutatateUsingCentersSampler( + predefined_samplings=top_trial_config, + n_mutations=max( + 1, random.randint(1, int(len(top_trial_config) / 2)) + ), + ), + environment_values=self._environment_values, + ) + for _ in range(n_requested * 5) + ] + + # Mutate by completely forgetting some values of the original config. + mutated_incumbents += [ + resolve( + pipeline=self._pipeline, + domain_sampler=MutateByForgettingSampler( + predefined_samplings=top_trial_config, + n_forgets=1, + ), + environment_values=self._environment_values, + ) + for _ in range(n_requested * 5) + ] + mutated_incumbents += [ + resolve( + pipeline=self._pipeline, + domain_sampler=MutateByForgettingSampler( + predefined_samplings=top_trial_config, + n_forgets=max( + 1, random.randint(1, int(len(top_trial_config) / 2)) + ), + ), + environment_values=self._environment_values, + ) + for _ in range(n_requested * 5) + ] + + # Do some crossovers. + if len(top_trials) > 1: + for _ in range(n_requested * 3): + trial_1, trial_2 = random.sample(top_trials, k=2) + + try: + crossover_sampler = CrossoverByMixingSampler( + predefined_samplings_1=trial_1.config, + predefined_samplings_2=trial_2.config, + prefer_first_probability=0.5, + ) + except CrossoverNotPossibleError: + # A crossover was not possible for them. Do nothing. + pass + else: + crossed_over_incumbents.append( + resolve( + pipeline=self._pipeline, + domain_sampler=crossover_sampler, + environment_values=self._environment_values, + ), + ) + + try: + crossover_sampler = CrossoverByMixingSampler( + predefined_samplings_1=trial_2.config, + predefined_samplings_2=trial_1.config, + prefer_first_probability=0.5, + ) + except CrossoverNotPossibleError: + # A crossover was not possible for them. Do nothing. + pass + else: + crossed_over_incumbents.append( + resolve( + pipeline=self._pipeline, + domain_sampler=crossover_sampler, + environment_values=self._environment_values, + ), + ) + + all_sampled_pipelines = [ + *random_pipelines, + *sometimes_priors_pipelines, + *mutated_incumbents, + *crossed_over_incumbents, + ] + + # Here we can have a model which picks from all the sampled pipelines. + # Currently, we just pick randomly from them. + chosen_pipelines = random.sample(all_sampled_pipelines, k=n_requested) + + if n_prev_trials == 0: + # In this case, always include the prior pipeline. + prior_pipeline = resolve( + pipeline=self._pipeline, + domain_sampler=self._try_always_priors_sampler, + environment_values=self._environment_values, + ) + chosen_pipelines[0] = prior_pipeline + + return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) diff --git a/neps/space/neps_spaces/bracket_optimizer.py b/neps/space/neps_spaces/optimizers/bracket_optimizer.py similarity index 96% rename from neps/space/neps_spaces/bracket_optimizer.py rename to neps/space/neps_spaces/optimizers/bracket_optimizer.py index 1d8cc5f82..ef0f993a5 100644 --- a/neps/space/neps_spaces/bracket_optimizer.py +++ b/neps/space/neps_spaces/optimizers/bracket_optimizer.py @@ -16,6 +16,7 @@ import pandas as pd import neps.optimizers.bracket_optimizer as standard_bracket_optimizer +import neps.space.neps_spaces.sampling from neps.optimizers.optimizer import SampledConfig from neps.optimizers.utils.brackets import PromoteAction, SampleAction from neps.space.neps_spaces import neps_space @@ -143,9 +144,13 @@ def _sample_prior( fidelity_level: Literal["min"] | Literal["max"], ) -> dict[str, Any]: # TODO: [lum] have a CenterSampler as fallback, not Random - _try_always_priors_sampler = neps_space.PriorOrFallbackSampler( - fallback_sampler=neps_space.RandomSampler(predefined_samplings={}), - prior_use_probability=1, + _try_always_priors_sampler = ( + neps.space.neps_spaces.sampling.PriorOrFallbackSampler( + fallback_sampler=neps.space.neps_spaces.sampling.RandomSampler( + predefined_samplings={} + ), + prior_use_probability=1, + ) ) _environment_values = {} @@ -182,7 +187,7 @@ def _convert_to_another_rung( _resolved_pipeline, resolution_context = neps_space.resolve( pipeline=self.space, - domain_sampler=neps_space.OnlyPredefinedValuesSampler( + domain_sampler=neps.space.neps_spaces.sampling.OnlyPredefinedValuesSampler( predefined_samplings=data.predefined_samplings, ), environment_values=_environment_values, diff --git a/neps/space/neps_spaces/optimizers/priorband.py b/neps/space/neps_spaces/optimizers/priorband.py index 32d8b03f0..58b179d71 100644 --- a/neps/space/neps_spaces/optimizers/priorband.py +++ b/neps/space/neps_spaces/optimizers/priorband.py @@ -13,6 +13,7 @@ import numpy as np +import neps.space.neps_spaces.sampling from neps.optimizers.utils import brackets from neps.space.neps_spaces import neps_space @@ -143,9 +144,13 @@ def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: def _sample_prior(self) -> dict[str, Any]: # TODO: [lum] have a CenterSampler as fallback, not Random - _try_always_priors_sampler = neps_space.PriorOrFallbackSampler( - fallback_sampler=neps_space.RandomSampler(predefined_samplings={}), - prior_use_probability=1, + _try_always_priors_sampler = ( + neps.space.neps_spaces.sampling.PriorOrFallbackSampler( + fallback_sampler=neps.space.neps_spaces.sampling.RandomSampler( + predefined_samplings={} + ), + prior_use_probability=1, + ) ) _environment_values = {} @@ -170,7 +175,9 @@ def _sample_random(self) -> dict[str, Any]: _resolved_pipeline, resolution_context = neps_space.resolve( pipeline=self.space, - domain_sampler=neps_space.RandomSampler(predefined_samplings={}), + domain_sampler=neps.space.neps_spaces.sampling.RandomSampler( + predefined_samplings={} + ), environment_values=_environment_values, ) @@ -182,7 +189,7 @@ def _mutate_inc(self, inc_config: dict[str, Any]) -> dict[str, Any]: _resolved_pipeline, resolution_context = neps_space.resolve( pipeline=self.space, - domain_sampler=neps_space.MutatateUsingCentersSampler( + domain_sampler=neps.space.neps_spaces.sampling.MutatateUsingCentersSampler( predefined_samplings=data.predefined_samplings, n_mutations=max(1, random.randint(1, int(len(inc_config) / 2))), ), diff --git a/neps/space/neps_spaces/sampling.py b/neps/space/neps_spaces/sampling.py new file mode 100644 index 000000000..64ebdbc86 --- /dev/null +++ b/neps/space/neps_spaces/sampling.py @@ -0,0 +1,401 @@ +"""This module defines various samplers for NEPS spaces, allowing for different sampling +strategies such as predefined values, random sampling, and mutation-based sampling. +""" + +from __future__ import annotations + +import random +from collections.abc import Mapping +from typing import Any, TypeVar, cast + +from neps.space.neps_spaces.neps_space import ( + ConfidenceLevel, + Domain, + DomainSampler, + Pipeline, +) + +T = TypeVar("T") +P = TypeVar("P", bound="Pipeline") + + +class OnlyPredefinedValuesSampler(DomainSampler): + """A sampler that only returns predefined values for a given path. + If the path is not found in the predefined values, it raises a ValueError. + :param predefined_samplings: A mapping of paths to predefined values. + """ + + def __init__( + self, + predefined_samplings: Mapping[str, Any], + ): + """Initialize the sampler with predefined samplings. + :param predefined_samplings: A mapping of paths to predefined values. + :raises ValueError: If predefined_samplings is empty. + """ + self._predefined_samplings = predefined_samplings + + def __call__( + self, + *, + domain_obj: Domain[T], # noqa: ARG002 + current_path: str, + ) -> T: + """Sample a value from the predefined samplings for the given path. + :param domain_obj: The domain object, not used in this sampler. + :param current_path: The path for which to sample a value. + :return: The predefined value for the given path. + :raises ValueError: If the current path is not in the predefined samplings. + """ + if current_path not in self._predefined_samplings: + raise ValueError(f"No predefined value for path: {current_path!r}.") + return cast(T, self._predefined_samplings[current_path]) + + +class RandomSampler(DomainSampler): + """A sampler that randomly samples from a predefined set of values. + If the current path is not in the predefined values, it samples from the domain. + :param predefined_samplings: A mapping of paths to predefined values. + This sampler will use these values if available, otherwise it will sample from the + domain. + """ + + def __init__( + self, + predefined_samplings: Mapping[str, Any], + ): + """Initialize the sampler with predefined samplings. + :param predefined_samplings: A mapping of paths to predefined values. + :raises + ValueError: If predefined_samplings is empty. + """ + self._predefined_samplings = predefined_samplings + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + """Sample a value from the predefined samplings or the domain. + :param domain_obj: The domain object from which to sample. + :param current_path: The path for which to sample a value. + :return: A sampled value, either from the predefined samplings or from the + domain. + :raises ValueError: If the current path is not in the predefined samplings and + the domain does not have a prior defined. + """ + if current_path not in self._predefined_samplings: + sampled_value = domain_obj.sample() + else: + sampled_value = cast(T, self._predefined_samplings[current_path]) + return sampled_value + + +class PriorOrFallbackSampler(DomainSampler): + """A sampler that uses a prior value if available, otherwise falls back to another + sampler. + :param fallback_sampler: A DomainSampler to use if the prior is not available. + :param prior_use_probability: The probability of using the prior value when + available. + This should be a float between 0 and 1, where 0 means never use the prior and 1 means + always use it. + :raises ValueError: If the prior_use_probability is not between 0 and 1. + """ + + def __init__( + self, + fallback_sampler: DomainSampler, + prior_use_probability: float, + ): + """Initialize the sampler with a fallback sampler and a prior use probability. + :param fallback_sampler: A DomainSampler to use if the prior is not available. + :param prior_use_probability: The probability of using the prior value when + available. + This should be a float between 0 and 1, where 0 means never use the prior and 1 + means always use it. + :raises ValueError: If the prior_use_probability is not between 0 and 1. + """ + if not 0 <= prior_use_probability <= 1: + raise ValueError( + "The given `prior_use_probability` value is out of range:" + f" {prior_use_probability!r}." + ) + + self._fallback_sampler = fallback_sampler + self._prior_use_probability = prior_use_probability + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + """Sample a value from the domain, using the prior if available and according to + the prior use probability. + :param domain_obj: The domain object from which to sample. + :param current_path: The path for which to sample a value. + :return: A sampled value, either from the prior or from the fallback sampler. + :raises ValueError: If the domain does not have a prior defined and the fallback + sampler is not provided. + """ + use_prior = random.choices( + (True, False), + weights=(self._prior_use_probability, 1 - self._prior_use_probability), + k=1, + )[0] + if domain_obj.has_prior and use_prior: + return domain_obj.prior + return self._fallback_sampler( + domain_obj=domain_obj, + current_path=current_path, + ) + + +def _mutate_samplings_to_make_by_forgetting( + samplings_to_make: Mapping[str, Any], + n_forgets: int, +) -> Mapping[str, Any]: + mutated_samplings_to_make = dict(**samplings_to_make) + + samplings_to_delete = random.sample( + list(samplings_to_make.keys()), + k=n_forgets, + ) + + for choice_to_delete in samplings_to_delete: + mutated_samplings_to_make.pop(choice_to_delete) + + return mutated_samplings_to_make + + +class MutateByForgettingSampler(DomainSampler): + """A sampler that mutates predefined samplings by forgetting a certain number of + them. It randomly selects a number of predefined samplings to forget and returns a + new sampler that only uses the remaining samplings. + :param predefined_samplings: A mapping of paths to predefined values. + :param n_forgets: The number of predefined samplings to forget. + This should be an integer greater than 0 and less than or equal to the number of + predefined samplings. + :raises ValueError: If n_forgets is not a valid integer or if it exceeds the number + of predefined samplings. + """ + + def __init__( + self, + predefined_samplings: Mapping[str, Any], + n_forgets: int, + ): + """Initialize the sampler with predefined samplings and a number of forgets. + :param predefined_samplings: A mapping of paths to predefined values. + :param n_forgets: The number of predefined samplings to forget. + This should be an integer greater than 0 and less than or equal to the number of + predefined samplings. + :raises ValueError: If n_forgets is not a valid integer or if it exceeds the + number of predefined samplings. + """ + if ( + not isinstance(n_forgets, int) + or n_forgets <= 0 + or n_forgets > len(predefined_samplings) + ): + raise ValueError(f"Invalid value for `n_forgets`: {n_forgets!r}.") + + mutated_samplings_to_make = _mutate_samplings_to_make_by_forgetting( + samplings_to_make=predefined_samplings, + n_forgets=n_forgets, + ) + + self._random_sampler = RandomSampler( + predefined_samplings=mutated_samplings_to_make, + ) + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + """Sample a value from the mutated predefined samplings or the domain. + :param domain_obj: The domain object from which to sample. + :param current_path: The path for which to sample a value. + :return: A sampled value, either from the mutated predefined samplings or from + the domain. + :raises ValueError: If the current path is not in the mutated predefined + samplings and the domain does not have a prior defined. + """ + return self._random_sampler(domain_obj=domain_obj, current_path=current_path) + + +class MutatateUsingCentersSampler(DomainSampler): + """A sampler that mutates predefined samplings by forgetting a certain number of them, + but still uses the original values as centers for sampling. + :param predefined_samplings: A mapping of paths to predefined values. + :param n_mutations: The number of predefined samplings to mutate. + This should be an integer greater than 0 and less than or equal to the number of + predefined samplings. + :raises ValueError: If n_mutations is not a valid integer or if it exceeds the number + of predefined samplings. + """ + + def __init__( + self, + predefined_samplings: Mapping[str, Any], + n_mutations: int, + ): + """Initialize the sampler with predefined samplings and a number of mutations. + :param predefined_samplings: A mapping of paths to predefined values. + :param n_mutations: The number of predefined samplings to mutate. + This should be an integer greater than 0 and less than or equal to the number of + predefined samplings. + :raises ValueError: If n_mutations is not a valid integer or if it exceeds + the number of predefined samplings. + """ + if ( + not isinstance(n_mutations, int) + or n_mutations <= 0 + or n_mutations > len(predefined_samplings) + ): + raise ValueError(f"Invalid value for `n_mutations`: {n_mutations!r}.") + + self._kept_samplings_to_make = _mutate_samplings_to_make_by_forgetting( + samplings_to_make=predefined_samplings, + n_forgets=n_mutations, + ) + + # Still remember the original choices. We'll use them as centers later. + self._original_samplings_to_make = predefined_samplings + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + """Sample a value from the predefined samplings or the domain, using original + values as centers if the current path is not in the kept samplings. + :param domain_obj: The domain object from which to sample. + :param current_path: The path for which to sample a value. + :return: A sampled value, either from the kept samplings or from the domain, + using the original values as centers if necessary. + :raises ValueError: If the current path is not in the kept samplings and the + domain does not have a prior defined. + """ + if current_path not in self._kept_samplings_to_make: + # For this path we either have forgotten the value or we never had it. + if current_path in self._original_samplings_to_make: + # We had a value for this path originally, use it as a center. + original_value = self._original_samplings_to_make[current_path] + sampled_value = domain_obj.centered_around( + center=original_value, + confidence=ConfidenceLevel.HIGH, + ).sample() + else: + # We never had a value for this path, we can only sample from the domain. + sampled_value = domain_obj.sample() + else: + # For this path we have chosen to keep the original value. + sampled_value = cast(T, self._kept_samplings_to_make[current_path]) + + return sampled_value + + +class CrossoverNotPossibleError(Exception): + """Exception raised when a crossover operation is not possible.""" + + +def _crossover_samplings_to_make_by_mixing( + predefined_samplings_1: Mapping[str, Any], + predefined_samplings_2: Mapping[str, Any], + prefer_first_probability: float, +) -> tuple[bool, Mapping[str, Any]]: + crossed_over_samplings = dict(**predefined_samplings_1) + made_any_crossovers = False + + for path, sampled_value_in_2 in predefined_samplings_2.items(): + if path in crossed_over_samplings: + use_value_from_2 = random.choices( + (False, True), + weights=(prefer_first_probability, 1 - prefer_first_probability), + k=1, + )[0] + if use_value_from_2: + crossed_over_samplings[path] = sampled_value_in_2 + made_any_crossovers = True + else: + crossed_over_samplings[path] = sampled_value_in_2 + + return made_any_crossovers, crossed_over_samplings + + +class CrossoverByMixingSampler(DomainSampler): + """A sampler that performs a crossover operation by mixing two sets of predefined + samplings. It combines the predefined samplings from two sources, allowing for a + probability-based + selection of values from either source. + :param predefined_samplings_1: The first set of predefined samplings. + :param predefined_samplings_2: The second set of predefined samplings. + :param prefer_first_probability: The probability of preferring values from the first + set over the second set when both have values for the same path. + This should be a float between 0 and 1, where 0 means always prefer the second set + and 1 means always prefer the first set. + :raises ValueError: If prefer_first_probability is not between 0 and 1. + :raises CrossoverNotPossibleError: If no crossovers were made between the two sets + of predefined samplings. + """ + + def __init__( + self, + predefined_samplings_1: Mapping[str, Any], + predefined_samplings_2: Mapping[str, Any], + prefer_first_probability: float, + ): + """Initialize the sampler with two sets of predefined samplings and a preference + probability for the first set. + :param predefined_samplings_1: The first set of predefined samplings. + :param predefined_samplings_2: The second set of predefined samplings. + :param prefer_first_probability: The probability of preferring values from the + first set over the second set when both have values for the same path. + This should be a float between 0 and 1, where 0 means always prefer the second + set and 1 means always prefer the first set. + :raises ValueError: If prefer_first_probability is not between 0 and 1. + """ + if not isinstance(prefer_first_probability, float) or not ( + 0 <= prefer_first_probability <= 1 + ): + raise ValueError( + "Invalid value for `prefer_first_probability`:" + f" {prefer_first_probability!r}." + ) + + ( + made_any_crossovers, + crossed_over_samplings_to_make, + ) = _crossover_samplings_to_make_by_mixing( + predefined_samplings_1=predefined_samplings_1, + predefined_samplings_2=predefined_samplings_2, + prefer_first_probability=prefer_first_probability, + ) + + if not made_any_crossovers: + raise CrossoverNotPossibleError("No crossovers were made.") + + self._random_sampler = RandomSampler( + predefined_samplings=crossed_over_samplings_to_make, + ) + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + """Sample a value from the crossed-over predefined samplings or the domain. + :param domain_obj: The domain object from which to sample. + :param current_path: The path for which to sample a value. + :return: A sampled value, either from the crossed-over predefined samplings or + from the domain. + :raises ValueError: If the current path is not in the crossed-over predefined + samplings and the domain does not have a prior defined. + """ + return self._random_sampler(domain_obj=domain_obj, current_path=current_path) diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index 70bd04282..af154c78a 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -5,6 +5,7 @@ import pytest import neps +import neps.space.neps_spaces.optimizers.algorithms from neps.space.neps_spaces import neps_space @@ -143,7 +144,10 @@ class DemoHyperparameterComplexSpace(neps_space.Pipeline): @pytest.mark.parametrize( "optimizer", - [neps_space.RandomSearch, neps_space.ComplexRandomSearch], + [ + neps.space.neps_spaces.optimizers.algorithms.RandomSearch, + neps.space.neps_spaces.optimizers.algorithms.ComplexRandomSearch, + ], ) def test_hyperparameter_demo(optimizer): pipeline_space = DemoHyperparameterSpace() @@ -166,7 +170,10 @@ def test_hyperparameter_demo(optimizer): @pytest.mark.parametrize( "optimizer", - [neps_space.RandomSearch, neps_space.ComplexRandomSearch], + [ + neps.space.neps_spaces.optimizers.algorithms.RandomSearch, + neps.space.neps_spaces.optimizers.algorithms.ComplexRandomSearch, + ], ) def test_hyperparameter_with_fidelity_demo(optimizer): pipeline_space = DemoHyperparameterWithFidelitySpace() @@ -189,7 +196,10 @@ def test_hyperparameter_with_fidelity_demo(optimizer): @pytest.mark.parametrize( "optimizer", - [neps_space.RandomSearch, neps_space.ComplexRandomSearch], + [ + neps.space.neps_spaces.optimizers.algorithms.RandomSearch, + neps.space.neps_spaces.optimizers.algorithms.ComplexRandomSearch, + ], ) def test_hyperparameter_complex_demo(optimizer): pipeline_space = DemoHyperparameterComplexSpace() @@ -314,7 +324,10 @@ class DemoOperationSpace(neps_space.Pipeline): @pytest.mark.parametrize( "optimizer", - [neps_space.RandomSearch, neps_space.ComplexRandomSearch], + [ + neps.space.neps_spaces.optimizers.algorithms.RandomSearch, + neps.space.neps_spaces.optimizers.algorithms.ComplexRandomSearch, + ], ) def test_operation_demo(optimizer): pipeline_space = DemoOperationSpace() diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index e3452146e..686f4da9a 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -7,7 +7,8 @@ import neps import neps.optimizers.algorithms as old_algorithms -import neps.space.neps_spaces.bracket_optimizer as new_bracket_optimizer +import neps.space.neps_spaces.optimizers.algorithms +import neps.space.neps_spaces.optimizers.bracket_optimizer as new_bracket_optimizer from neps.space.neps_spaces import neps_space _COSTS = {} @@ -88,11 +89,11 @@ class DemoHyperparameterWithFidelitySpace(neps_space.Pipeline): ("optimizer", "optimizer_name"), [ ( - neps_space.RandomSearch, + neps.space.neps_spaces.optimizers.algorithms.RandomSearch, "new__RandomSearch", ), ( - neps_space.ComplexRandomSearch, + neps.space.neps_spaces.optimizers.algorithms.ComplexRandomSearch, "new__ComplexRandomSearch", ), ( diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index dea908e0d..ec462a9e6 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -7,7 +7,8 @@ import neps import neps.optimizers.algorithms as old_algorithms -import neps.space.neps_spaces.bracket_optimizer as new_bracket_optimizer +import neps.space.neps_spaces.optimizers.algorithms +import neps.space.neps_spaces.optimizers.bracket_optimizer as new_bracket_optimizer from neps.space.neps_spaces import neps_space @@ -75,11 +76,11 @@ class DemoHyperparameterWithFidelitySpace(neps_space.Pipeline): ("optimizer", "optimizer_name"), [ ( - neps_space.RandomSearch, + neps.space.neps_spaces.optimizers.algorithms.RandomSearch, "new__RandomSearch", ), ( - neps_space.ComplexRandomSearch, + neps.space.neps_spaces.optimizers.algorithms.ComplexRandomSearch, "new__ComplexRandomSearch", ), ( diff --git a/tests/test_neps_space/test_search_space__fidelity.py b/tests/test_neps_space/test_search_space__fidelity.py index 8b8d5a151..5eaa62ffb 100644 --- a/tests/test_neps_space/test_search_space__fidelity.py +++ b/tests/test_neps_space/test_search_space__fidelity.py @@ -4,6 +4,7 @@ import pytest +import neps.space.neps_spaces.sampling from neps.space.neps_spaces import neps_space @@ -60,7 +61,8 @@ def test_fidelity_resolution_raises_when_resolved_with_invalid_value(): with pytest.raises( ValueError, match=re.escape( - "Value for fidelity with name 'fidelity_integer1' is outside its allowed range [1, 1000]. Received: -10." + "Value for fidelity with name 'fidelity_integer1' is outside its allowed" + " range [1, 1000]. Received: -10." ), ): neps_space.resolve( @@ -98,7 +100,7 @@ def test_fidelity_resolution_with_context_works(): # with a valid value for it in the environment. resolved_pipeline, resolution_context = neps_space.resolve( pipeline=pipeline, - domain_sampler=neps_space.OnlyPredefinedValuesSampler( + domain_sampler=neps.space.neps_spaces.sampling.OnlyPredefinedValuesSampler( predefined_samplings=samplings_to_make, ), environment_values=environment_values, diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index 686343fdb..15a979f4a 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -2,6 +2,7 @@ import pytest +import neps.space.neps_spaces.sampling from neps.space.neps_spaces import config_string, neps_space @@ -278,7 +279,7 @@ def test_resolve_context(): resolved_pipeline, resolution_context = neps_space.resolve( pipeline, - domain_sampler=neps_space.OnlyPredefinedValuesSampler( + domain_sampler=neps.space.neps_spaces.sampling.OnlyPredefinedValuesSampler( predefined_samplings=samplings_to_make, ), ) @@ -365,7 +366,7 @@ def test_resolve_context_alt(): resolved_pipeline, resolution_context = neps_space.resolve( pipeline, - domain_sampler=neps_space.OnlyPredefinedValuesSampler( + domain_sampler=neps.space.neps_spaces.sampling.OnlyPredefinedValuesSampler( predefined_samplings=samplings_to_make, ), ) diff --git a/tests/test_neps_space/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py index 5d9d61f4c..98bc58c7c 100644 --- a/tests/test_neps_space/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -2,6 +2,7 @@ import pytest +import neps.space.neps_spaces.sampling from neps.space.neps_spaces import config_string, neps_space @@ -344,7 +345,7 @@ def test_hnas_like_context(): resolved_pipeline, resolution_context = neps_space.resolve( pipeline=pipeline, - domain_sampler=neps_space.OnlyPredefinedValuesSampler( + domain_sampler=neps.space.neps_spaces.sampling.OnlyPredefinedValuesSampler( predefined_samplings=samplings_to_make, ), ) diff --git a/tests/test_neps_space/test_search_space__reuse_arch_elements.py b/tests/test_neps_space/test_search_space__reuse_arch_elements.py index 13b62a5e1..2cc59a23b 100644 --- a/tests/test_neps_space/test_search_space__reuse_arch_elements.py +++ b/tests/test_neps_space/test_search_space__reuse_arch_elements.py @@ -2,6 +2,7 @@ import pytest +import neps.space.neps_spaces.sampling from neps.space.neps_spaces import neps_space @@ -323,15 +324,45 @@ def test_shared_complex(): @pytest.mark.repeat(50) def test_shared_complex_string(): possible_cell_config_strings = { - "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (avg_pool) (avg_pool) (avg_pool) (avg_pool) (avg_pool))", - "(cell {'float_hp': 0.5, 'int_hp': 2} (zero) (sequential3 (relu) (conv3x3) (batch)) (zero) (sequential3 (relu) (conv3x3) (batch)) (zero) (sequential3 (relu) (conv3x3) (batch)))", - "(cell {'float_hp': 0.5, 'int_hp': 2} (sequential3 (relu) (conv3x3) (batch)) (avg_pool) (sequential3 (relu) (conv3x3) (batch)) (avg_pool) (sequential3 (relu) (conv3x3) (batch)) (avg_pool))", + ( + "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (avg_pool) (avg_pool)" + " (avg_pool) (avg_pool) (avg_pool))" + ), + ( + "(cell {'float_hp': 0.5, 'int_hp': 2} (zero) (sequential3 (relu) (conv3x3)" + " (batch)) (zero) (sequential3 (relu) (conv3x3) (batch)) (zero) (sequential3" + " (relu) (conv3x3) (batch)))" + ), + ( + "(cell {'float_hp': 0.5, 'int_hp': 2} (sequential3 (relu) (conv3x3) (batch))" + " (avg_pool) (sequential3 (relu) (conv3x3) (batch)) (avg_pool) (sequential3" + " (relu) (conv3x3) (batch)) (avg_pool))" + ), "(cell {'float_hp': 0.5, 'int_hp': 2} (zero) (zero) (zero) (zero) (zero) (zero))", - "(cell {'float_hp': 0.5, 'int_hp': 2} (zero) (avg_pool) (zero) (avg_pool) (zero) (avg_pool))", - "(cell {'float_hp': 0.5, 'int_hp': 2} (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu) (conv3x3) (batch)))", - "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (zero) (avg_pool) (zero) (avg_pool) (zero))", - "(cell {'float_hp': 0.5, 'int_hp': 2} (sequential3 (relu) (conv3x3) (batch)) (zero) (sequential3 (relu) (conv3x3) (batch)) (zero) (sequential3 (relu) (conv3x3) (batch)) (zero))", - "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (sequential3 (relu) (conv3x3) (batch)) (avg_pool) (sequential3 (relu) (conv3x3) (batch)) (avg_pool) (sequential3 (relu) (conv3x3) (batch)))", + ( + "(cell {'float_hp': 0.5, 'int_hp': 2} (zero) (avg_pool) (zero) (avg_pool)" + " (zero) (avg_pool))" + ), + ( + "(cell {'float_hp': 0.5, 'int_hp': 2} (sequential3 (relu) (conv3x3) (batch))" + " (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu) (conv3x3)" + " (batch)) (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu)" + " (conv3x3) (batch)) (sequential3 (relu) (conv3x3) (batch)))" + ), + ( + "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (zero) (avg_pool) (zero)" + " (avg_pool) (zero))" + ), + ( + "(cell {'float_hp': 0.5, 'int_hp': 2} (sequential3 (relu) (conv3x3) (batch))" + " (zero) (sequential3 (relu) (conv3x3) (batch)) (zero) (sequential3 (relu)" + " (conv3x3) (batch)) (zero))" + ), + ( + "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (sequential3 (relu)" + " (conv3x3) (batch)) (avg_pool) (sequential3 (relu) (conv3x3) (batch))" + " (avg_pool) (sequential3 (relu) (conv3x3) (batch)))" + ), } pipeline = CellPipeline() @@ -361,7 +392,7 @@ def test_shared_complex_context(): resolved_pipeline_first, _resolution_context_first = neps_space.resolve( pipeline=pipeline, - domain_sampler=neps_space.OnlyPredefinedValuesSampler( + domain_sampler=neps.space.neps_spaces.sampling.OnlyPredefinedValuesSampler( predefined_samplings=samplings_to_make, ), ) @@ -375,7 +406,7 @@ def test_shared_complex_context(): resolved_pipeline_second, _resolution_context_second = neps_space.resolve( pipeline=pipeline, - domain_sampler=neps_space.OnlyPredefinedValuesSampler( + domain_sampler=neps.space.neps_spaces.sampling.OnlyPredefinedValuesSampler( predefined_samplings=samplings_to_make, ), ) @@ -390,7 +421,10 @@ def test_shared_complex_context(): # the second resolution should give us a new object assert resolved_pipeline_second is not resolved_pipeline_first - expected_config_string: str = "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (zero) (avg_pool) (zero) (avg_pool) (zero))" + expected_config_string: str = ( + "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (zero) (avg_pool) (zero)" + " (avg_pool) (zero))" + ) # however, their final results should be the same thing assert ( From 8c7e4ab51b7f1a3f2ac0ad87e43b752d84ad626a Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 2 Jul 2025 02:20:18 +0200 Subject: [PATCH 011/156] Refactor NEPS space tests to use updated parameters module - Updated import statements to use `neps.space.neps_spaces.parameters` for all relevant classes and functions in test files. - Refactored test classes and methods to replace direct usage of `neps_space` with the new parameters module, ensuring consistency across the test suite. - Adjusted the instantiation of various operations and categorical choices to align with the new structure. - Ensured that all references to operations, floats, integers, and categorical choices are correctly sourced from the updated parameters module. --- neps/api.py | 2 +- neps/optimizers/__init__.py | 2 +- neps/optimizers/algorithms.py | 2 +- neps/space/neps_spaces/neps_space.py | 874 +----------------- .../neps_spaces/optimizers/algorithms.py | 2 +- .../optimizers/bracket_optimizer.py | 7 +- .../space/neps_spaces/optimizers/priorband.py | 3 +- neps/space/neps_spaces/parameters.py | 831 +++++++++++++++++ neps/space/neps_spaces/sampling.py | 24 +- neps/space/parsing.py | 8 +- .../test_neps_space/test_domain__centering.py | 68 +- .../test_neps_space/test_neps_integration.py | 103 ++- ...st_neps_integration_priorband__max_cost.py | 19 +- ...t_neps_integration_priorband__max_evals.py | 19 +- .../test_search_space__fidelity.py | 17 +- .../test_search_space__grammar_like.py | 173 ++-- .../test_search_space__hnas_like.py | 175 ++-- .../test_search_space__nos_like.py | 100 +- .../test_search_space__recursion.py | 15 +- .../test_search_space__resampled.py | 115 +-- .../test_search_space__reuse_arch_elements.py | 105 ++- tests/test_neps_space/utils.py | 8 +- 22 files changed, 1379 insertions(+), 1293 deletions(-) create mode 100644 neps/space/neps_spaces/parameters.py diff --git a/neps/api.py b/neps/api.py index 955d3a09e..396dca0d6 100644 --- a/neps/api.py +++ b/neps/api.py @@ -19,7 +19,7 @@ from neps.optimizers.algorithms import CustomOptimizer from neps.space import Parameter, SearchSpace - from neps.space.neps_spaces.neps_space import Pipeline + from neps.space.neps_spaces.parameters import Pipeline from neps.state import EvaluatePipelineReturn logger = logging.getLogger(__name__) diff --git a/neps/optimizers/__init__.py b/neps/optimizers/__init__.py index 3c747104d..6ad255346 100644 --- a/neps/optimizers/__init__.py +++ b/neps/optimizers/__init__.py @@ -10,7 +10,7 @@ determine_optimizer_automatically, ) from neps.optimizers.optimizer import AskFunction, OptimizerInfo -from neps.space.neps_spaces.neps_space import Pipeline +from neps.space.neps_spaces.parameters import Pipeline from neps.utils.common import extract_keyword_defaults if TYPE_CHECKING: diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 3d90e8bb0..8f24618a8 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -43,7 +43,7 @@ from neps.optimizers.utils.brackets import Bracket from neps.space import SearchSpace - from neps.space.neps_spaces.neps_space import Pipeline + from neps.space.neps_spaces.parameters import Pipeline logger = logging.getLogger(__name__) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 7f99a2329..7cbf38da5 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -1,879 +1,39 @@ -"""This module defines various classes and protocols for representing and manipulating -search spaces in NePS (Neural Parameter Search). It includes definitions for domains, -pipelines, operations, and fidelity, as well as utilities for sampling and resolving -search spaces. +"""This module provides functionality for resolving NePS spaces, including sampling from +domains, resolving pipelines, and handling various resolvable objects. """ from __future__ import annotations -import abc import contextlib import dataclasses -import enum import functools -import math -import random -from collections.abc import Callable, Generator, Mapping, Sequence +from collections.abc import Callable, Generator, Mapping from typing import ( Any, - Generic, - Protocol, TypeVar, cast, - runtime_checkable, ) from neps.optimizers import optimizer from neps.space.neps_spaces import config_string -from neps.space.neps_spaces.sampling import OnlyPredefinedValuesSampler, RandomSampler +from neps.space.neps_spaces.parameters import ( + Categorical, + Domain, + Fidelity, + Operation, + Pipeline, + Resampled, + Resolvable, +) +from neps.space.neps_spaces.sampling import ( + DomainSampler, + OnlyPredefinedValuesSampler, + RandomSampler, +) -T = TypeVar("T") P = TypeVar("P", bound="Pipeline") -# ------------------------------------------------- - - -class _Unset: - pass - - -_UNSET = _Unset() - - -# ------------------------------------------------- - - -@runtime_checkable -class Resolvable(Protocol): - """A protocol for objects that can be resolved into attributes.""" - - def get_attrs(self) -> Mapping[str, Any]: - """Get the attributes of the resolvable object as a mapping.""" - raise NotImplementedError() - - def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: - """Create a new resolvable object from the given attributes.""" - raise NotImplementedError() - - -def resolvable_is_fully_resolved(resolvable: Resolvable) -> bool: - """Check if a resolvable object is fully resolved. - A resolvable object is considered fully resolved if all its attributes are either - not instances of Resolvable or are themselves fully resolved. - """ - attr_objects = resolvable.get_attrs().values() - return all( - not isinstance(obj, Resolvable) or resolvable_is_fully_resolved(obj) - for obj in attr_objects - ) - - -@runtime_checkable -class DomainSampler(Protocol): - """A protocol for domain samplers that can sample from a given domain.""" - - def __call__( - self, - *, - domain_obj: Domain[T], - current_path: str, - ) -> T: - """Sample a value from the given domain. - :param domain_obj: The domain object to sample from. - :param current_path: The current path in the resolution context. - :return: A sampled value of type T from the domain. - :raises NotImplementedError: If the method is not implemented. - """ - raise NotImplementedError() - - -# ------------------------------------------------- - - -class Pipeline(Resolvable): - """A class representing a pipeline in NePS spaces. - It contains attributes that can be resolved into a configuration string, - and it can be used to sample configurations based on defined domains. - """ - - @property - def fidelity_attrs(self) -> Mapping[str, Fidelity]: - """Get the fidelity attributes of the pipeline. Fidelity attributes are special - attributes that represent the fidelity of the pipeline. - :return: A mapping of fidelity attribute names to Fidelity objects. - """ - return {k: v for k, v in self.get_attrs().items() if isinstance(v, Fidelity)} - - def get_attrs(self) -> Mapping[str, Any]: - """Get the attributes of the pipeline as a mapping. - This method collects all attributes of the pipeline class and instance, - excluding private attributes and methods, and returns them as a dictionary. - :return: A mapping of attribute names to their values. - """ - attrs = {} - - for attr_name, attr_value in vars(self.__class__).items(): - if attr_name.startswith("_") or callable(attr_value): - continue - attrs[attr_name] = attr_value - - for attr_name, attr_value in vars(self).items(): - if attr_name.startswith("_") or callable(attr_value): - continue - attrs[attr_name] = attr_value - - properties_to_ignore = ("fidelity_attrs",) - for property_to_ignore in properties_to_ignore: - attrs.pop(property_to_ignore, None) - - return attrs - - def from_attrs(self, attrs: Mapping[str, Any]) -> Pipeline: - """Create a new Pipeline instance from the given attributes. - :param attrs: A mapping of attribute names to their values. - :return: A new Pipeline instance with the specified attributes. - :raises ValueError: If the attributes do not match the pipeline's expected - structure. - """ - new_pipeline = Pipeline() - for name, value in attrs.items(): - setattr(new_pipeline, name, value) - return new_pipeline - - -class ConfidenceLevel(enum.Enum): - """Enum representing confidence levels for sampling.""" - - LOW = "low" - MEDIUM = "medium" - HIGH = "high" - - -class Domain(Resolvable, abc.ABC, Generic[T]): - """An abstract base class representing a domain in NePS spaces. - It defines the properties and methods that all domains must implement, - such as min and max values, sampling, and centered domains. - """ - - @property - @abc.abstractmethod - def min_value(self) -> T: - """Get the minimum value of the domain.""" - raise NotImplementedError() - - @property - @abc.abstractmethod - def max_value(self) -> T: - """Get the maximum value of the domain.""" - raise NotImplementedError() - - @property - @abc.abstractmethod - def has_prior(self) -> bool: - """Check if the domain has a prior defined.""" - raise NotImplementedError() - - @property - @abc.abstractmethod - def prior(self) -> T: - """Get the prior value of the domain. - Raises ValueError if the domain has no prior defined. - """ - raise NotImplementedError() - - @property - @abc.abstractmethod - def prior_confidence(self) -> ConfidenceLevel: - """Get the confidence level of the prior. - Raises ValueError if the domain has no prior defined. - """ - raise NotImplementedError() - - @property - @abc.abstractmethod - def range_compatibility_identifier(self) -> str: - """Get a string identifier for the range compatibility of the domain. - This identifier is used to check if two domains are compatible based on their - ranges. - """ - raise NotImplementedError() - - @abc.abstractmethod - def sample(self) -> T: - """Sample a value from the domain. - Returns a value of type T that is within the domain's range. - """ - raise NotImplementedError() - - @abc.abstractmethod - def centered_around( - self, - center: T, - confidence: ConfidenceLevel, - ) -> Domain[T]: - """Create a new domain centered around a given value with a specified confidence - level. - :param center: The value around which to center the new domain. - :param confidence: The confidence level for the new domain. - :return: A new Domain instance that is centered around the specified value. - :raises ValueError: If the center value is not within the domain's range. - """ - raise NotImplementedError() - - def get_attrs(self) -> Mapping[str, Any]: - """Get the attributes of the domain as a mapping. - This method collects all attributes of the domain class and instance, - excluding private attributes and methods, and returns them as a dictionary. - :return: A mapping of attribute names to their values. - """ - return {k.lstrip("_"): v for k, v in vars(self).items()} - - def from_attrs(self, attrs: Mapping[str, Any]) -> Domain[T]: - """Create a new Domain instance from the given attributes. - :param attrs: A mapping of attribute names to their values. - :return: A new Domain instance with the specified attributes. - :raises ValueError: If the attributes do not match the domain's expected - structure. - """ - return type(self)(**attrs) - - -def _calculate_new_domain_bounds( - number_type: type[int] | type[float], - min_value: int | float, - max_value: int | float, - center: int | float, - confidence: ConfidenceLevel, -) -> tuple[int, int] | tuple[float, float]: - if center < min_value or center > max_value: - raise ValueError( - f"Center value {center!r} must be within domain range [{min_value!r}," - f" {max_value!r}]" - ) - - # Determine a chunk size by splitting the domain range into a fixed number of chunks. - # Then use the confidence level to decide how many chunks to include - # around the given center (on each side). - - number_of_chunks = 10.0 - chunk_size = (max_value - min_value) / number_of_chunks - - # The numbers refer to how many segments to have on each side of the center. - # TODO: [lum] we need to make sure that in the end the range does not just have the - # center, but at least a little bit more around it too. - confidence_to_number_of_chunks_on_each_side = { - ConfidenceLevel.HIGH: 1.0, - ConfidenceLevel.MEDIUM: 2.5, - ConfidenceLevel.LOW: 4.0, - } - - chunk_multiplier = confidence_to_number_of_chunks_on_each_side[confidence] - interval_radius = chunk_size * chunk_multiplier - - if number_type is int: - # In this case we need to use ceil/floor so that we end up with ints. - new_min = max(min_value, math.floor(center - interval_radius)) - new_max = min(max_value, math.ceil(center + interval_radius)) - elif number_type is float: - new_min = max(min_value, center - interval_radius) - new_max = min(max_value, center + interval_radius) - else: - raise ValueError(f"Unsupported number type {number_type!r}.") - - return new_min, new_max - - -class Categorical(Domain[int], Generic[T]): - """A domain representing a categorical choice from a set of options. - It allows for sampling from a predefined set of choices and can be centered around - a specific choice with a given confidence level. - :param choices: A tuple of choices or a Domain of choices. - :param prior_index: The index of the prior choice in the choices tuple. - :param prior_confidence: The confidence level of the prior choice. - """ - - def __init__( - self, - choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T], - prior_index: int | Domain[int] | _Unset = _UNSET, - prior_confidence: ConfidenceLevel | _Unset = _UNSET, - ): - """Initialize the Categorical domain with choices and optional prior. - :param choices: A tuple of choices or a Domain of choices. - :param prior_index: The index of the prior choice in the choices tuple. - :param prior_confidence: The confidence level of the prior choice. - :raises ValueError: If the choices are empty or prior_index is out of bounds. - """ - self._choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T] - if isinstance(choices, Sequence): - self._choices = tuple(choice for choice in choices) - else: - self._choices = choices - self._prior_index = prior_index - self._prior_confidence = prior_confidence - - @property - def min_value(self) -> int: - """Get the minimum value of the categorical domain. - :return: The minimum index of the choices, which is always 0. - """ - return 0 - - @property - def max_value(self) -> int: - """Get the maximum value of the categorical domain. - :return: The maximum index of the choices, which is the length of choices minus 1. - """ - return max(len(cast(tuple, self._choices)) - 1, 0) - - @property - def choices(self) -> tuple[T | Domain[T] | Resolvable, ...] | Domain[T]: - """Get the choices available in the categorical domain. - :return: A tuple of choices or a Domain of choices. - """ - return self._choices - - @property - def has_prior(self) -> bool: - """Check if the categorical domain has a prior defined. - :return: True if the prior index and confidence are set, False otherwise. - """ - return self._prior_index is not _UNSET and self._prior_confidence is not _UNSET - - @property - def prior(self) -> int: - """Get the prior index of the categorical domain. - :return: The index of the prior choice in the choices tuple. - :raises ValueError: If the domain has no prior defined. - """ - if not self.has_prior: - raise ValueError("Domain has no prior defined.") - return int(cast(int, self._prior_index)) - - @property - def prior_confidence(self) -> ConfidenceLevel: - """Get the confidence level of the prior choice. - :return: The confidence level of the prior choice. - :raises ValueError: If the domain has no prior defined. - """ - if not self.has_prior: - raise ValueError("Domain has no prior defined.") - return cast(ConfidenceLevel, self._prior_confidence) - - @property - def range_compatibility_identifier(self) -> str: - """Get a string identifier for the range compatibility of the categorical domain. - :return: A string representation of the number of choices in the domain. - """ - return f"{len(cast(tuple, self._choices))}" - - def sample(self) -> int: - """Sample a random index from the categorical choices. - :return: A randomly selected index from the choices tuple. - :raises ValueError: If the choices are empty. - """ - return int(random.randint(0, len(cast(tuple[T], self._choices)) - 1)) - - def centered_around( - self, - center: int, - confidence: ConfidenceLevel, - ) -> Categorical: - """Create a new categorical domain centered around a specific choice index. - :param center: The index of the choice around which to center the new domain. - :param confidence: The confidence level for the new domain. - :return: A new Categorical instance with a range centered around the specified - choice index. - :raises ValueError: If the center index is out of bounds of the choices. - """ - new_min, new_max = cast( - tuple[int, int], - _calculate_new_domain_bounds( - number_type=int, - min_value=self.min_value, - max_value=self.max_value, - center=center, - confidence=confidence, - ), - ) - new_choices = cast(tuple, self._choices)[new_min : new_max + 1] - return Categorical( - choices=new_choices, - prior_index=new_choices.index(cast(tuple, self._choices)[center]), - prior_confidence=confidence, - ) - - -class Float(Domain[float]): - """A domain representing a continuous range of floating-point values. - It allows for sampling from a range defined by minimum and maximum values, - and can be centered around a specific value with a given confidence level. - :param min_value: The minimum value of the domain. - :param max_value: The maximum value of the domain. - :param log: Whether to sample values on a logarithmic scale. - :param prior: The prior value for the domain, if any. - :param prior_confidence: The confidence level of the prior value. - """ - - def __init__( - self, - min_value: float, - max_value: float, - log: bool = False, # noqa: FBT001, FBT002 - prior: float | _Unset = _UNSET, - prior_confidence: ConfidenceLevel | _Unset = _UNSET, - ): - """Initialize the Float domain with min and max values, and optional prior. - :param min_value: The minimum value of the domain. - :param max_value: The maximum value of the domain. - :param log: Whether to sample values on a logarithmic scale. - :param prior: The prior value for the domain, if any. - :param prior_confidence: The confidence level of the prior value. - :raises ValueError: If min_value is greater than max_value. - """ - self._min_value = min_value - self._max_value = max_value - self._log = log - self._prior = prior - self._prior_confidence = prior_confidence - - @property - def min_value(self) -> float: - """Get the minimum value of the floating-point domain. - :return: The minimum value of the domain. - :raises ValueError: If min_value is greater than max_value. - """ - return self._min_value - - @property - def max_value(self) -> float: - """Get the maximum value of the floating-point domain. - :return: The maximum value of the domain. - :raises ValueError: If min_value is greater than max_value. - """ - return self._max_value - - @property - def has_prior(self) -> bool: - """Check if the floating-point domain has a prior defined. - :return: True if the prior and prior confidence are set, False otherwise. - """ - return self._prior is not _UNSET and self._prior_confidence is not _UNSET - - @property - def prior(self) -> float: - """Get the prior value of the floating-point domain. - :return: The prior value of the domain. - :raises ValueError: If the domain has no prior defined. - """ - if not self.has_prior: - raise ValueError("Domain has no prior defined.") - return float(cast(float, self._prior)) - - @property - def prior_confidence(self) -> ConfidenceLevel: - """Get the confidence level of the prior value. - :return: The confidence level of the prior value. - :raises ValueError: If the domain has no prior defined. - """ - if not self.has_prior: - raise ValueError("Domain has no prior defined.") - return cast(ConfidenceLevel, self._prior_confidence) - - @property - def range_compatibility_identifier(self) -> str: - """Get a string identifier for the range compatibility of the floating-point - domain. - :return: A string representation of the minimum and maximum values, and whether - the domain is logarithmic. - """ - return f"{self._min_value}_{self._max_value}_{self._log}" - - def sample(self) -> float: - """Sample a random floating-point value from the domain. - :return: A randomly selected floating-point value within the domain's range. - :raises ValueError: If min_value is greater than max_value. - """ - if self._log: - log_min = math.log(self._min_value) - log_max = math.log(self._max_value) - return float(math.exp(random.uniform(log_min, log_max))) - return float(random.uniform(self._min_value, self._max_value)) - - def centered_around( - self, - center: float, - confidence: ConfidenceLevel, - ) -> Float: - """Create a new floating-point domain centered around a specific value. - :param center: The value around which to center the new domain. - :param confidence: The confidence level for the new domain. - :return: A new Float instance that is centered around the specified value. - :raises ValueError: If the center value is not within the domain's range. - """ - new_min, new_max = _calculate_new_domain_bounds( - number_type=float, - min_value=self.min_value, - max_value=self.max_value, - center=center, - confidence=confidence, - ) - return Float( - min_value=new_min, - max_value=new_max, - log=self._log, - prior=center, - prior_confidence=confidence, - ) - - -class Integer(Domain[int]): - """A domain representing a range of integer values. - It allows for sampling from a range defined by minimum and maximum values, - and can be centered around a specific value with a given confidence level. - :param min_value: The minimum value of the domain. - :param max_value: The maximum value of the domain. - :param log: Whether to sample values on a logarithmic scale. - :param prior: The prior value for the domain, if any. - :param prior_confidence: The confidence level of the prior value. - """ - - def __init__( - self, - min_value: int, - max_value: int, - log: bool = False, # noqa: FBT001, FBT002 - prior: int | _Unset = _UNSET, - prior_confidence: ConfidenceLevel | _Unset = _UNSET, - ): - """Initialize the Integer domain with min and max values, and optional prior. - :param min_value: The minimum value of the domain. - :param max_value: The maximum value of the domain. - :param log: Whether to sample values on a logarithmic scale. - :param prior: The prior value for the domain, if any. - :param prior_confidence: The confidence level of the prior value. - :raises ValueError: If min_value is greater than max_value. - """ - self._min_value = min_value - self._max_value = max_value - self._log = log - self._prior = prior - self._prior_confidence = prior_confidence - - @property - def min_value(self) -> int: - """Get the minimum value of the integer domain. - :return: The minimum value of the domain. - :raises ValueError: If min_value is greater than max_value. - """ - return self._min_value - - @property - def max_value(self) -> int: - """Get the maximum value of the integer domain. - :return: The maximum value of the domain. - :raises ValueError: If min_value is greater than max_value. - """ - return self._max_value - - @property - def has_prior(self) -> bool: - """Check if the integer domain has a prior defined. - :return: True if the prior and prior confidence are set, False otherwise. - """ - return self._prior is not _UNSET and self._prior_confidence is not _UNSET - - @property - def prior(self) -> int: - """Get the prior value of the integer domain. - :return: The prior value of the domain. - :raises ValueError: If the domain has no prior defined. - """ - if not self.has_prior: - raise ValueError("Domain has no prior defined.") - return int(cast(int, self._prior)) - - @property - def prior_confidence(self) -> ConfidenceLevel: - """Get the confidence level of the prior value. - :return: The confidence level of the prior value. - :raises ValueError: If the domain has no prior defined. - """ - if not self.has_prior: - raise ValueError("Domain has no prior defined.") - return cast(ConfidenceLevel, self._prior_confidence) - - @property - def range_compatibility_identifier(self) -> str: - """Get a string identifier for the range compatibility of the integer domain. - :return: A string representation of the minimum and maximum values, and whether - the domain is logarithmic. - """ - return f"{self._min_value}_{self._max_value}_{self._log}" - - def sample(self) -> int: - """Sample a random integer value from the domain. - :return: A randomly selected integer value within the domain's range. - :raises NotImplementedError: If the domain is set to sample on a logarithmic - scale, as this is not implemented yet. - """ - if self._log: - raise NotImplementedError("TODO.") - return int(random.randint(self._min_value, self._max_value)) - - def centered_around( - self, - center: int, - confidence: ConfidenceLevel, - ) -> Integer: - """Create a new integer domain centered around a specific value. - :param center: The value around which to center the new domain. - :param confidence: The confidence level for the new domain. - :return: A new Integer instance that is centered around the specified value. - :raises ValueError: If the center value is not within the domain's range. - """ - new_min, new_max = cast( - tuple[int, int], - _calculate_new_domain_bounds( - number_type=int, - min_value=self.min_value, - max_value=self.max_value, - center=center, - confidence=confidence, - ), - ) - return Integer( - min_value=new_min, - max_value=new_max, - log=self._log, - prior=center, - prior_confidence=confidence, - ) - - -class Operation(Resolvable): - """A class representing an operation in a NePS space. - It encapsulates an operator (a callable or a string), arguments, and keyword - arguments. - The operator can be a function or a string representing a function name. - :param operator: The operator to be used in the operation, can be a callable or a - string. - :param args: A sequence of arguments to be passed to the operator. - :param kwargs: A mapping of keyword arguments to be passed to the operator. - """ - - def __init__( - self, - operator: Callable | str, - args: Sequence[Any] | Resolvable | None = None, - kwargs: Mapping[str, Any] | Resolvable | None = None, - ): - """Initialize the Operation with an operator, arguments, and keyword arguments. - :param operator: The operator to be used in the operation, can be a callable or a - string. - :param args: A sequence of arguments to be passed to the operator. - :param kwargs: A mapping of keyword arguments to be passed to the operator. - :raises ValueError: If the operator is not callable or a string. - """ - self._operator = operator - - self._args: tuple[Any, ...] | Resolvable - if not isinstance(args, Resolvable): - self._args = tuple(args) if args else () - else: - self._args = args - - self._kwargs: Mapping[str, Any] | Resolvable - if not isinstance(kwargs, Resolvable): - self._kwargs = kwargs if kwargs else {} - else: - self._kwargs = kwargs - - @property - def operator(self) -> Callable | str: - """Get the operator of the operation. - :return: The operator, which can be a callable or a string. - :raises ValueError: If the operator is not callable or a string. - """ - return self._operator - - @property - def args(self) -> tuple[Any, ...]: - """Get the arguments of the operation. - :return: A tuple of arguments to be passed to the operator. - :raises ValueError: If the args are not a tuple or Resolvable. - """ - return cast(tuple[Any, ...], self._args) - - @property - def kwargs(self) -> Mapping[str, Any]: - """Get the keyword arguments of the operation. - :return: A mapping of keyword arguments to be passed to the operator. - :raises ValueError: If the kwargs are not a mapping or Resolvable. - """ - return cast(Mapping[str, Any], self._kwargs) - - def get_attrs(self) -> Mapping[str, Any]: - """Get the attributes of the operation as a mapping. - This method collects all attributes of the operation class and instance, - excluding private attributes and methods, and returns them as a dictionary. - :return: A mapping of attribute names to their values. - """ - # TODO: [lum] simplify this. We know the fields. Maybe other places too. - result: dict[str, Any] = {} - for name, value in vars(self).items(): - stripped_name = name.lstrip("_") - if isinstance(value, dict): - for k, v in value.items(): - # Multiple {{}} needed to escape surrounding '{' and '}'. - result[f"{stripped_name}{{{k}}}"] = v - elif isinstance(value, tuple): - for i, v in enumerate(value): - result[f"{stripped_name}[{i}]"] = v - else: - result[stripped_name] = value - return result - - def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: - """Create a new Operation instance from the given attributes. - :param attrs: A mapping of attribute names to their values. - :return: A new Operation instance with the specified attributes. - :raises ValueError: If the attributes do not match the operation's expected - structure. - """ - # TODO: [lum] simplify this. We know the fields. Maybe other places too. - final_attrs: dict[str, Any] = {} - for name, value in attrs.items(): - if "{" in name and "}" in name: - base, key = name.split("{") - key = key.rstrip("}") - final_attrs.setdefault(base, {})[key] = value - elif "[" in name and "]" in name: - base, idx_str = name.split("[") - idx = int(idx_str.rstrip("]")) - final_attrs.setdefault(base, []).insert(idx, value) - else: - final_attrs[name] = value - return type(self)(**final_attrs) - - -class Resampled(Resolvable): - """A class representing a resampling operation in a NePS space. - It can either be a resolvable object or a string representing a resampling by name. - :param source: The source of the resampling, can be a resolvable object or a string. - """ - - def __init__(self, source: Resolvable | str): - """Initialize the Resampled object with a source. - :param source: The source of the resampling, which can be a resolvable object or - a string. - :raises ValueError: If the source is not a resolvable object or a string. - """ - self._source = source - - @property - def source(self) -> Resolvable | str: - """Get the source of the resampling. - :return: The source of the resampling, which can be a resolvable object or a - string. - """ - return self._source - - @property - def is_resampling_by_name(self) -> bool: - """Check if the resampling is by name. - :return: True if the source is a string, False otherwise. - """ - return isinstance(self._source, str) - - def get_attrs(self) -> Mapping[str, Any]: - """Get the attributes of the resampling source as a mapping. - :return: A mapping of attribute names to their values. - :raises ValueError: If the resampling is by name or the source is not resolvable. - """ - if self.is_resampling_by_name: - raise ValueError( - f"This is a resampling by name, can't get attrs from it: {self.source!r}." - ) - if not isinstance(self._source, Resolvable): - raise ValueError( - f"Source should be a resolvable object. Is: {self._source!r}." - ) - return self._source.get_attrs() - - def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: - """Create a new resolvable object from the given attributes. - :param attrs: A mapping of attribute names to their values. - :return: A new resolvable object created from the specified attributes. - :raises ValueError: If the resampling is by name or the source is not resolvable. - """ - if self.is_resampling_by_name: - raise ValueError( - "This is a resampling by name, can't create object for it:" - f" {self.source!r}." - ) - if not isinstance(self._source, Resolvable): - raise ValueError( - f"Source should be a resolvable object. Is: {self._source!r}." - ) - return self._source.from_attrs(attrs) - - -class Fidelity(Resolvable, Generic[T]): - """A class representing a fidelity in a NePS space. - It encapsulates a domain that defines the range of values for the fidelity. - :param domain: The domain of the fidelity, which can be an Integer or Float domain. - :raises ValueError: If the domain has a prior defined, as fidelity domains should not - have priors. - """ - - def __init__(self, domain: Integer | Float): - """Initialize the Fidelity with a domain. - :param domain: The domain of the fidelity, which can be an Integer or Float - domain. - :raises ValueError: If the domain has a prior defined, as fidelity domains should - not have priors. - """ - if domain.has_prior: - raise ValueError(f"The domain of a Fidelity can not have priors: {domain!r}.") - self._domain = domain - - @property - def min_value(self) -> int | float: - """Get the minimum value of the fidelity domain. - :return: The minimum value of the fidelity domain. - """ - return self._domain.min_value - - @property - def max_value(self) -> int | float: - """Get the maximum value of the fidelity domain. - :return: The maximum value of the fidelity domain. - """ - return self._domain.max_value - - def get_attrs(self) -> Mapping[str, Any]: - """Get the attributes of the fidelity as a mapping. - This method collects all attributes of the fidelity class and instance, - excluding private attributes and methods, and returns them as a dictionary. - :return: A mapping of attribute names to their values. - :raises ValueError: If the fidelity has no domain defined. - """ - raise ValueError("For a Fidelity object there is nothing to resolve.") - - def from_attrs(self, attrs: Mapping[str, Any]) -> Fidelity: # noqa: ARG002 - """Create a new Fidelity instance from the given attributes. - :param attrs: A mapping of attribute names to their values. - :return: A new Fidelity instance with the specified attributes. - :raises ValueError: If the fidelity has no domain defined. - """ - raise ValueError("For a Fidelity object there is nothing to resolve.") - - -# ------------------------------------------------- - - class SamplingResolutionContext: """A context for resolving samplings in a NePS space. It manages the resolution root, domain sampler, environment values, diff --git a/neps/space/neps_spaces/optimizers/algorithms.py b/neps/space/neps_spaces/optimizers/algorithms.py index c0bef3e9e..c3a0b04a8 100644 --- a/neps/space/neps_spaces/optimizers/algorithms.py +++ b/neps/space/neps_spaces/optimizers/algorithms.py @@ -13,7 +13,6 @@ from typing import TYPE_CHECKING from neps.space.neps_spaces.neps_space import ( - Pipeline, _prepare_sampled_configs, resolve, ) @@ -30,6 +29,7 @@ import neps.state.optimizer as optimizer_state import neps.state.trial as trial_state from neps.optimizers import optimizer + from neps.space.neps_spaces.parameters import Pipeline from neps.state.trial import Trial diff --git a/neps/space/neps_spaces/optimizers/bracket_optimizer.py b/neps/space/neps_spaces/optimizers/bracket_optimizer.py index ef0f993a5..900b0247f 100644 --- a/neps/space/neps_spaces/optimizers/bracket_optimizer.py +++ b/neps/space/neps_spaces/optimizers/bracket_optimizer.py @@ -16,6 +16,7 @@ import pandas as pd import neps.optimizers.bracket_optimizer as standard_bracket_optimizer +import neps.space.neps_spaces.parameters import neps.space.neps_spaces.sampling from neps.optimizers.optimizer import SampledConfig from neps.optimizers.utils.brackets import PromoteAction, SampleAction @@ -35,7 +36,7 @@ class _BracketOptimizer: """The pipeline space to optimize over.""" - space: neps_space.Pipeline + space: neps.space.neps_spaces.parameters.Pipeline """Whether or not to sample the prior first. @@ -198,7 +199,7 @@ def _convert_to_another_rung( def priorband( - space: neps_space.Pipeline, + space: neps.space.neps_spaces.parameters.Pipeline, *, eta: int = 3, sample_prior_first: bool | Literal["highest_fidelity"] = False, @@ -231,7 +232,7 @@ def priorband( def _bracket_optimizer( - pipeline_space: neps_space.Pipeline, + pipeline_space: neps.space.neps_spaces.parameters.Pipeline, *, bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], eta: int, diff --git a/neps/space/neps_spaces/optimizers/priorband.py b/neps/space/neps_spaces/optimizers/priorband.py index 58b179d71..aa2139f45 100644 --- a/neps/space/neps_spaces/optimizers/priorband.py +++ b/neps/space/neps_spaces/optimizers/priorband.py @@ -13,6 +13,7 @@ import numpy as np +import neps.space.neps_spaces.parameters import neps.space.neps_spaces.sampling from neps.optimizers.utils import brackets from neps.space.neps_spaces import neps_space @@ -26,7 +27,7 @@ class PriorBandSampler: """Implement a sampler based on PriorBand.""" """The pipeline space to optimize over.""" - space: neps_space.Pipeline + space: neps.space.neps_spaces.parameters.Pipeline """The eta value to use for the SH bracket.""" eta: int diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py new file mode 100644 index 000000000..a5ee19424 --- /dev/null +++ b/neps/space/neps_spaces/parameters.py @@ -0,0 +1,831 @@ +"""This module defines various classes and protocols for representing and manipulating +search spaces in NePS (Neural Parameter Search). It includes definitions for domains, +pipelines, operations, and fidelity, as well as utilities for sampling and resolving +search spaces. +""" + +from __future__ import annotations + +import abc +import enum +import math +import random +from collections.abc import Callable, Mapping, Sequence +from typing import Any, Generic, Protocol, TypeVar, cast, runtime_checkable + +T = TypeVar("T") + + +class _Unset: + pass + + +_UNSET = _Unset() + + +@runtime_checkable +class Resolvable(Protocol): + """A protocol for objects that can be resolved into attributes.""" + + def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the resolvable object as a mapping.""" + raise NotImplementedError() + + def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: + """Create a new resolvable object from the given attributes.""" + raise NotImplementedError() + + +def resolvable_is_fully_resolved(resolvable: Resolvable) -> bool: + """Check if a resolvable object is fully resolved. + A resolvable object is considered fully resolved if all its attributes are either + not instances of Resolvable or are themselves fully resolved. + """ + attr_objects = resolvable.get_attrs().values() + return all( + not isinstance(obj, Resolvable) or resolvable_is_fully_resolved(obj) + for obj in attr_objects + ) + + +class Fidelity(Resolvable, Generic[T]): + """A class representing a fidelity in a NePS space. + It encapsulates a domain that defines the range of values for the fidelity. + :param domain: The domain of the fidelity, which can be an Integer or Float domain. + :raises ValueError: If the domain has a prior defined, as fidelity domains should not + have priors. + """ + + def __init__(self, domain: Integer | Float): + """Initialize the Fidelity with a domain. + :param domain: The domain of the fidelity, which can be an Integer or Float + domain. + :raises ValueError: If the domain has a prior defined, as fidelity domains should + not have priors. + """ + if domain.has_prior: + raise ValueError(f"The domain of a Fidelity can not have priors: {domain!r}.") + self._domain = domain + + @property + def min_value(self) -> int | float: + """Get the minimum value of the fidelity domain. + :return: The minimum value of the fidelity domain. + """ + return self._domain.min_value + + @property + def max_value(self) -> int | float: + """Get the maximum value of the fidelity domain. + :return: The maximum value of the fidelity domain. + """ + return self._domain.max_value + + def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the fidelity as a mapping. + This method collects all attributes of the fidelity class and instance, + excluding private attributes and methods, and returns them as a dictionary. + :return: A mapping of attribute names to their values. + :raises ValueError: If the fidelity has no domain defined. + """ + raise ValueError("For a Fidelity object there is nothing to resolve.") + + def from_attrs(self, attrs: Mapping[str, Any]) -> Fidelity: # noqa: ARG002 + """Create a new Fidelity instance from the given attributes. + :param attrs: A mapping of attribute names to their values. + :return: A new Fidelity instance with the specified attributes. + :raises ValueError: If the fidelity has no domain defined. + """ + raise ValueError("For a Fidelity object there is nothing to resolve.") + + +# ------------------------------------------------- + + +class Pipeline(Resolvable): + """A class representing a pipeline in NePS spaces. + It contains attributes that can be resolved into a configuration string, + and it can be used to sample configurations based on defined domains. + """ + + @property + def fidelity_attrs(self) -> Mapping[str, Fidelity]: + """Get the fidelity attributes of the pipeline. Fidelity attributes are special + attributes that represent the fidelity of the pipeline. + :return: A mapping of fidelity attribute names to Fidelity objects. + """ + return {k: v for k, v in self.get_attrs().items() if isinstance(v, Fidelity)} + + def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the pipeline as a mapping. + This method collects all attributes of the pipeline class and instance, + excluding private attributes and methods, and returns them as a dictionary. + :return: A mapping of attribute names to their values. + """ + attrs = {} + + for attr_name, attr_value in vars(self.__class__).items(): + if attr_name.startswith("_") or callable(attr_value): + continue + attrs[attr_name] = attr_value + + for attr_name, attr_value in vars(self).items(): + if attr_name.startswith("_") or callable(attr_value): + continue + attrs[attr_name] = attr_value + + properties_to_ignore = ("fidelity_attrs",) + for property_to_ignore in properties_to_ignore: + attrs.pop(property_to_ignore, None) + + return attrs + + def from_attrs(self, attrs: Mapping[str, Any]) -> Pipeline: + """Create a new Pipeline instance from the given attributes. + :param attrs: A mapping of attribute names to their values. + :return: A new Pipeline instance with the specified attributes. + :raises ValueError: If the attributes do not match the pipeline's expected + structure. + """ + new_pipeline = Pipeline() + for name, value in attrs.items(): + setattr(new_pipeline, name, value) + return new_pipeline + + +class ConfidenceLevel(enum.Enum): + """Enum representing confidence levels for sampling.""" + + LOW = "low" + MEDIUM = "medium" + HIGH = "high" + + +class Domain(Resolvable, abc.ABC, Generic[T]): + """An abstract base class representing a domain in NePS spaces. + It defines the properties and methods that all domains must implement, + such as min and max values, sampling, and centered domains. + """ + + @property + @abc.abstractmethod + def min_value(self) -> T: + """Get the minimum value of the domain.""" + raise NotImplementedError() + + @property + @abc.abstractmethod + def max_value(self) -> T: + """Get the maximum value of the domain.""" + raise NotImplementedError() + + @property + @abc.abstractmethod + def has_prior(self) -> bool: + """Check if the domain has a prior defined.""" + raise NotImplementedError() + + @property + @abc.abstractmethod + def prior(self) -> T: + """Get the prior value of the domain. + Raises ValueError if the domain has no prior defined. + """ + raise NotImplementedError() + + @property + @abc.abstractmethod + def prior_confidence(self) -> ConfidenceLevel: + """Get the confidence level of the prior. + Raises ValueError if the domain has no prior defined. + """ + raise NotImplementedError() + + @property + @abc.abstractmethod + def range_compatibility_identifier(self) -> str: + """Get a string identifier for the range compatibility of the domain. + This identifier is used to check if two domains are compatible based on their + ranges. + """ + raise NotImplementedError() + + @abc.abstractmethod + def sample(self) -> T: + """Sample a value from the domain. + Returns a value of type T that is within the domain's range. + """ + raise NotImplementedError() + + @abc.abstractmethod + def centered_around( + self, + center: T, + confidence: ConfidenceLevel, + ) -> Domain[T]: + """Create a new domain centered around a given value with a specified confidence + level. + :param center: The value around which to center the new domain. + :param confidence: The confidence level for the new domain. + :return: A new Domain instance that is centered around the specified value. + :raises ValueError: If the center value is not within the domain's range. + """ + raise NotImplementedError() + + def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the domain as a mapping. + This method collects all attributes of the domain class and instance, + excluding private attributes and methods, and returns them as a dictionary. + :return: A mapping of attribute names to their values. + """ + return {k.lstrip("_"): v for k, v in vars(self).items()} + + def from_attrs(self, attrs: Mapping[str, Any]) -> Domain[T]: + """Create a new Domain instance from the given attributes. + :param attrs: A mapping of attribute names to their values. + :return: A new Domain instance with the specified attributes. + :raises ValueError: If the attributes do not match the domain's expected + structure. + """ + return type(self)(**attrs) + + +def _calculate_new_domain_bounds( + number_type: type[int] | type[float], + min_value: int | float, + max_value: int | float, + center: int | float, + confidence: ConfidenceLevel, +) -> tuple[int, int] | tuple[float, float]: + if center < min_value or center > max_value: + raise ValueError( + f"Center value {center!r} must be within domain range [{min_value!r}," + f" {max_value!r}]" + ) + + # Determine a chunk size by splitting the domain range into a fixed number of chunks. + # Then use the confidence level to decide how many chunks to include + # around the given center (on each side). + + number_of_chunks = 10.0 + chunk_size = (max_value - min_value) / number_of_chunks + + # The numbers refer to how many segments to have on each side of the center. + # TODO: [lum] we need to make sure that in the end the range does not just have the + # center, but at least a little bit more around it too. + confidence_to_number_of_chunks_on_each_side = { + ConfidenceLevel.HIGH: 1.0, + ConfidenceLevel.MEDIUM: 2.5, + ConfidenceLevel.LOW: 4.0, + } + + chunk_multiplier = confidence_to_number_of_chunks_on_each_side[confidence] + interval_radius = chunk_size * chunk_multiplier + + if number_type is int: + # In this case we need to use ceil/floor so that we end up with ints. + new_min = max(min_value, math.floor(center - interval_radius)) + new_max = min(max_value, math.ceil(center + interval_radius)) + elif number_type is float: + new_min = max(min_value, center - interval_radius) + new_max = min(max_value, center + interval_radius) + else: + raise ValueError(f"Unsupported number type {number_type!r}.") + + return new_min, new_max + + +class Categorical(Domain[int], Generic[T]): + """A domain representing a categorical choice from a set of options. + It allows for sampling from a predefined set of choices and can be centered around + a specific choice with a given confidence level. + :param choices: A tuple of choices or a Domain of choices. + :param prior_index: The index of the prior choice in the choices tuple. + :param prior_confidence: The confidence level of the prior choice. + """ + + def __init__( + self, + choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T], + prior_index: int | Domain[int] | _Unset = _UNSET, + prior_confidence: ConfidenceLevel | _Unset = _UNSET, + ): + """Initialize the Categorical domain with choices and optional prior. + :param choices: A tuple of choices or a Domain of choices. + :param prior_index: The index of the prior choice in the choices tuple. + :param prior_confidence: The confidence level of the prior choice. + :raises ValueError: If the choices are empty or prior_index is out of bounds. + """ + self._choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T] + if isinstance(choices, Sequence): + self._choices = tuple(choice for choice in choices) + else: + self._choices = choices + self._prior_index = prior_index + self._prior_confidence = prior_confidence + + @property + def min_value(self) -> int: + """Get the minimum value of the categorical domain. + :return: The minimum index of the choices, which is always 0. + """ + return 0 + + @property + def max_value(self) -> int: + """Get the maximum value of the categorical domain. + :return: The maximum index of the choices, which is the length of choices minus 1. + """ + return max(len(cast(tuple, self._choices)) - 1, 0) + + @property + def choices(self) -> tuple[T | Domain[T] | Resolvable, ...] | Domain[T]: + """Get the choices available in the categorical domain. + :return: A tuple of choices or a Domain of choices. + """ + return self._choices + + @property + def has_prior(self) -> bool: + """Check if the categorical domain has a prior defined. + :return: True if the prior index and confidence are set, False otherwise. + """ + return self._prior_index is not _UNSET and self._prior_confidence is not _UNSET + + @property + def prior(self) -> int: + """Get the prior index of the categorical domain. + :return: The index of the prior choice in the choices tuple. + :raises ValueError: If the domain has no prior defined. + """ + if not self.has_prior: + raise ValueError("Domain has no prior defined.") + return int(cast(int, self._prior_index)) + + @property + def prior_confidence(self) -> ConfidenceLevel: + """Get the confidence level of the prior choice. + :return: The confidence level of the prior choice. + :raises ValueError: If the domain has no prior defined. + """ + if not self.has_prior: + raise ValueError("Domain has no prior defined.") + return cast(ConfidenceLevel, self._prior_confidence) + + @property + def range_compatibility_identifier(self) -> str: + """Get a string identifier for the range compatibility of the categorical domain. + :return: A string representation of the number of choices in the domain. + """ + return f"{len(cast(tuple, self._choices))}" + + def sample(self) -> int: + """Sample a random index from the categorical choices. + :return: A randomly selected index from the choices tuple. + :raises ValueError: If the choices are empty. + """ + return int(random.randint(0, len(cast(tuple[T], self._choices)) - 1)) + + def centered_around( + self, + center: int, + confidence: ConfidenceLevel, + ) -> Categorical: + """Create a new categorical domain centered around a specific choice index. + :param center: The index of the choice around which to center the new domain. + :param confidence: The confidence level for the new domain. + :return: A new Categorical instance with a range centered around the specified + choice index. + :raises ValueError: If the center index is out of bounds of the choices. + """ + new_min, new_max = cast( + tuple[int, int], + _calculate_new_domain_bounds( + number_type=int, + min_value=self.min_value, + max_value=self.max_value, + center=center, + confidence=confidence, + ), + ) + new_choices = cast(tuple, self._choices)[new_min : new_max + 1] + return Categorical( + choices=new_choices, + prior_index=new_choices.index(cast(tuple, self._choices)[center]), + prior_confidence=confidence, + ) + + +class Float(Domain[float]): + """A domain representing a continuous range of floating-point values. + It allows for sampling from a range defined by minimum and maximum values, + and can be centered around a specific value with a given confidence level. + :param min_value: The minimum value of the domain. + :param max_value: The maximum value of the domain. + :param log: Whether to sample values on a logarithmic scale. + :param prior: The prior value for the domain, if any. + :param prior_confidence: The confidence level of the prior value. + """ + + def __init__( + self, + min_value: float, + max_value: float, + log: bool = False, # noqa: FBT001, FBT002 + prior: float | _Unset = _UNSET, + prior_confidence: ConfidenceLevel | _Unset = _UNSET, + ): + """Initialize the Float domain with min and max values, and optional prior. + :param min_value: The minimum value of the domain. + :param max_value: The maximum value of the domain. + :param log: Whether to sample values on a logarithmic scale. + :param prior: The prior value for the domain, if any. + :param prior_confidence: The confidence level of the prior value. + :raises ValueError: If min_value is greater than max_value. + """ + self._min_value = min_value + self._max_value = max_value + self._log = log + self._prior = prior + self._prior_confidence = prior_confidence + + @property + def min_value(self) -> float: + """Get the minimum value of the floating-point domain. + :return: The minimum value of the domain. + :raises ValueError: If min_value is greater than max_value. + """ + return self._min_value + + @property + def max_value(self) -> float: + """Get the maximum value of the floating-point domain. + :return: The maximum value of the domain. + :raises ValueError: If min_value is greater than max_value. + """ + return self._max_value + + @property + def has_prior(self) -> bool: + """Check if the floating-point domain has a prior defined. + :return: True if the prior and prior confidence are set, False otherwise. + """ + return self._prior is not _UNSET and self._prior_confidence is not _UNSET + + @property + def prior(self) -> float: + """Get the prior value of the floating-point domain. + :return: The prior value of the domain. + :raises ValueError: If the domain has no prior defined. + """ + if not self.has_prior: + raise ValueError("Domain has no prior defined.") + return float(cast(float, self._prior)) + + @property + def prior_confidence(self) -> ConfidenceLevel: + """Get the confidence level of the prior value. + :return: The confidence level of the prior value. + :raises ValueError: If the domain has no prior defined. + """ + if not self.has_prior: + raise ValueError("Domain has no prior defined.") + return cast(ConfidenceLevel, self._prior_confidence) + + @property + def range_compatibility_identifier(self) -> str: + """Get a string identifier for the range compatibility of the floating-point + domain. + :return: A string representation of the minimum and maximum values, and whether + the domain is logarithmic. + """ + return f"{self._min_value}_{self._max_value}_{self._log}" + + def sample(self) -> float: + """Sample a random floating-point value from the domain. + :return: A randomly selected floating-point value within the domain's range. + :raises ValueError: If min_value is greater than max_value. + """ + if self._log: + log_min = math.log(self._min_value) + log_max = math.log(self._max_value) + return float(math.exp(random.uniform(log_min, log_max))) + return float(random.uniform(self._min_value, self._max_value)) + + def centered_around( + self, + center: float, + confidence: ConfidenceLevel, + ) -> Float: + """Create a new floating-point domain centered around a specific value. + :param center: The value around which to center the new domain. + :param confidence: The confidence level for the new domain. + :return: A new Float instance that is centered around the specified value. + :raises ValueError: If the center value is not within the domain's range. + """ + new_min, new_max = _calculate_new_domain_bounds( + number_type=float, + min_value=self.min_value, + max_value=self.max_value, + center=center, + confidence=confidence, + ) + return Float( + min_value=new_min, + max_value=new_max, + log=self._log, + prior=center, + prior_confidence=confidence, + ) + + +class Integer(Domain[int]): + """A domain representing a range of integer values. + It allows for sampling from a range defined by minimum and maximum values, + and can be centered around a specific value with a given confidence level. + :param min_value: The minimum value of the domain. + :param max_value: The maximum value of the domain. + :param log: Whether to sample values on a logarithmic scale. + :param prior: The prior value for the domain, if any. + :param prior_confidence: The confidence level of the prior value. + """ + + def __init__( + self, + min_value: int, + max_value: int, + log: bool = False, # noqa: FBT001, FBT002 + prior: int | _Unset = _UNSET, + prior_confidence: ConfidenceLevel | _Unset = _UNSET, + ): + """Initialize the Integer domain with min and max values, and optional prior. + :param min_value: The minimum value of the domain. + :param max_value: The maximum value of the domain. + :param log: Whether to sample values on a logarithmic scale. + :param prior: The prior value for the domain, if any. + :param prior_confidence: The confidence level of the prior value. + :raises ValueError: If min_value is greater than max_value. + """ + self._min_value = min_value + self._max_value = max_value + self._log = log + self._prior = prior + self._prior_confidence = prior_confidence + + @property + def min_value(self) -> int: + """Get the minimum value of the integer domain. + :return: The minimum value of the domain. + :raises ValueError: If min_value is greater than max_value. + """ + return self._min_value + + @property + def max_value(self) -> int: + """Get the maximum value of the integer domain. + :return: The maximum value of the domain. + :raises ValueError: If min_value is greater than max_value. + """ + return self._max_value + + @property + def has_prior(self) -> bool: + """Check if the integer domain has a prior defined. + :return: True if the prior and prior confidence are set, False otherwise. + """ + return self._prior is not _UNSET and self._prior_confidence is not _UNSET + + @property + def prior(self) -> int: + """Get the prior value of the integer domain. + :return: The prior value of the domain. + :raises ValueError: If the domain has no prior defined. + """ + if not self.has_prior: + raise ValueError("Domain has no prior defined.") + return int(cast(int, self._prior)) + + @property + def prior_confidence(self) -> ConfidenceLevel: + """Get the confidence level of the prior value. + :return: The confidence level of the prior value. + :raises ValueError: If the domain has no prior defined. + """ + if not self.has_prior: + raise ValueError("Domain has no prior defined.") + return cast(ConfidenceLevel, self._prior_confidence) + + @property + def range_compatibility_identifier(self) -> str: + """Get a string identifier for the range compatibility of the integer domain. + :return: A string representation of the minimum and maximum values, and whether + the domain is logarithmic. + """ + return f"{self._min_value}_{self._max_value}_{self._log}" + + def sample(self) -> int: + """Sample a random integer value from the domain. + :return: A randomly selected integer value within the domain's range. + :raises NotImplementedError: If the domain is set to sample on a logarithmic + scale, as this is not implemented yet. + """ + if self._log: + raise NotImplementedError("TODO.") + return int(random.randint(self._min_value, self._max_value)) + + def centered_around( + self, + center: int, + confidence: ConfidenceLevel, + ) -> Integer: + """Create a new integer domain centered around a specific value. + :param center: The value around which to center the new domain. + :param confidence: The confidence level for the new domain. + :return: A new Integer instance that is centered around the specified value. + :raises ValueError: If the center value is not within the domain's range. + """ + new_min, new_max = cast( + tuple[int, int], + _calculate_new_domain_bounds( + number_type=int, + min_value=self.min_value, + max_value=self.max_value, + center=center, + confidence=confidence, + ), + ) + return Integer( + min_value=new_min, + max_value=new_max, + log=self._log, + prior=center, + prior_confidence=confidence, + ) + + +class Operation(Resolvable): + """A class representing an operation in a NePS space. + It encapsulates an operator (a callable or a string), arguments, and keyword + arguments. + The operator can be a function or a string representing a function name. + :param operator: The operator to be used in the operation, can be a callable or a + string. + :param args: A sequence of arguments to be passed to the operator. + :param kwargs: A mapping of keyword arguments to be passed to the operator. + """ + + def __init__( + self, + operator: Callable | str, + args: Sequence[Any] | Resolvable | None = None, + kwargs: Mapping[str, Any] | Resolvable | None = None, + ): + """Initialize the Operation with an operator, arguments, and keyword arguments. + :param operator: The operator to be used in the operation, can be a callable or a + string. + :param args: A sequence of arguments to be passed to the operator. + :param kwargs: A mapping of keyword arguments to be passed to the operator. + :raises ValueError: If the operator is not callable or a string. + """ + self._operator = operator + + self._args: tuple[Any, ...] | Resolvable + if not isinstance(args, Resolvable): + self._args = tuple(args) if args else () + else: + self._args = args + + self._kwargs: Mapping[str, Any] | Resolvable + if not isinstance(kwargs, Resolvable): + self._kwargs = kwargs if kwargs else {} + else: + self._kwargs = kwargs + + @property + def operator(self) -> Callable | str: + """Get the operator of the operation. + :return: The operator, which can be a callable or a string. + :raises ValueError: If the operator is not callable or a string. + """ + return self._operator + + @property + def args(self) -> tuple[Any, ...]: + """Get the arguments of the operation. + :return: A tuple of arguments to be passed to the operator. + :raises ValueError: If the args are not a tuple or Resolvable. + """ + return cast(tuple[Any, ...], self._args) + + @property + def kwargs(self) -> Mapping[str, Any]: + """Get the keyword arguments of the operation. + :return: A mapping of keyword arguments to be passed to the operator. + :raises ValueError: If the kwargs are not a mapping or Resolvable. + """ + return cast(Mapping[str, Any], self._kwargs) + + def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the operation as a mapping. + This method collects all attributes of the operation class and instance, + excluding private attributes and methods, and returns them as a dictionary. + :return: A mapping of attribute names to their values. + """ + # TODO: [lum] simplify this. We know the fields. Maybe other places too. + result: dict[str, Any] = {} + for name, value in vars(self).items(): + stripped_name = name.lstrip("_") + if isinstance(value, dict): + for k, v in value.items(): + # Multiple {{}} needed to escape surrounding '{' and '}'. + result[f"{stripped_name}{{{k}}}"] = v + elif isinstance(value, tuple): + for i, v in enumerate(value): + result[f"{stripped_name}[{i}]"] = v + else: + result[stripped_name] = value + return result + + def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: + """Create a new Operation instance from the given attributes. + :param attrs: A mapping of attribute names to their values. + :return: A new Operation instance with the specified attributes. + :raises ValueError: If the attributes do not match the operation's expected + structure. + """ + # TODO: [lum] simplify this. We know the fields. Maybe other places too. + final_attrs: dict[str, Any] = {} + for name, value in attrs.items(): + if "{" in name and "}" in name: + base, key = name.split("{") + key = key.rstrip("}") + final_attrs.setdefault(base, {})[key] = value + elif "[" in name and "]" in name: + base, idx_str = name.split("[") + idx = int(idx_str.rstrip("]")) + final_attrs.setdefault(base, []).insert(idx, value) + else: + final_attrs[name] = value + return type(self)(**final_attrs) + + +class Resampled(Resolvable): + """A class representing a resampling operation in a NePS space. + It can either be a resolvable object or a string representing a resampling by name. + :param source: The source of the resampling, can be a resolvable object or a string. + """ + + def __init__(self, source: Resolvable | str): + """Initialize the Resampled object with a source. + :param source: The source of the resampling, which can be a resolvable object or + a string. + :raises ValueError: If the source is not a resolvable object or a string. + """ + self._source = source + + @property + def source(self) -> Resolvable | str: + """Get the source of the resampling. + :return: The source of the resampling, which can be a resolvable object or a + string. + """ + return self._source + + @property + def is_resampling_by_name(self) -> bool: + """Check if the resampling is by name. + :return: True if the source is a string, False otherwise. + """ + return isinstance(self._source, str) + + def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the resampling source as a mapping. + :return: A mapping of attribute names to their values. + :raises ValueError: If the resampling is by name or the source is not resolvable. + """ + if self.is_resampling_by_name: + raise ValueError( + f"This is a resampling by name, can't get attrs from it: {self.source!r}." + ) + if not isinstance(self._source, Resolvable): + raise ValueError( + f"Source should be a resolvable object. Is: {self._source!r}." + ) + return self._source.get_attrs() + + def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: + """Create a new resolvable object from the given attributes. + :param attrs: A mapping of attribute names to their values. + :return: A new resolvable object created from the specified attributes. + :raises ValueError: If the resampling is by name or the source is not resolvable. + """ + if self.is_resampling_by_name: + raise ValueError( + "This is a resampling by name, can't create object for it:" + f" {self.source!r}." + ) + if not isinstance(self._source, Resolvable): + raise ValueError( + f"Source should be a resolvable object. Is: {self._source!r}." + ) + return self._source.from_attrs(attrs) diff --git a/neps/space/neps_spaces/sampling.py b/neps/space/neps_spaces/sampling.py index 64ebdbc86..56f7d7109 100644 --- a/neps/space/neps_spaces/sampling.py +++ b/neps/space/neps_spaces/sampling.py @@ -6,12 +6,11 @@ import random from collections.abc import Mapping -from typing import Any, TypeVar, cast +from typing import Any, Protocol, TypeVar, cast, runtime_checkable -from neps.space.neps_spaces.neps_space import ( +from neps.space.neps_spaces.parameters import ( ConfidenceLevel, Domain, - DomainSampler, Pipeline, ) @@ -19,6 +18,25 @@ P = TypeVar("P", bound="Pipeline") +@runtime_checkable +class DomainSampler(Protocol): + """A protocol for domain samplers that can sample from a given domain.""" + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + """Sample a value from the given domain. + :param domain_obj: The domain object to sample from. + :param current_path: The current path in the resolution context. + :return: A sampled value of type T from the domain. + :raises NotImplementedError: If the method is not implemented. + """ + raise NotImplementedError() + + class OnlyPredefinedValuesSampler(DomainSampler): """A sampler that only returns predefined values for a given path. If the path is not found in the predefined values, it raises a ValueError. diff --git a/neps/space/parsing.py b/neps/space/parsing.py index fad079c92..d7d6d5fcc 100644 --- a/neps/space/parsing.py +++ b/neps/space/parsing.py @@ -9,7 +9,7 @@ from collections.abc import Mapping, Sequence from typing import TYPE_CHECKING, Any, TypeAlias -from neps.space.neps_spaces.neps_space import Pipeline +from neps.space.neps_spaces.parameters import Pipeline from neps.space.parameters import Categorical, Constant, Float, Integer, Parameter from neps.space.search_space import SearchSpace @@ -56,7 +56,9 @@ def scientific_parse(value: str | int | float) -> str | int | float: ) -def as_parameter(details: SerializedParameter) -> Parameter | Constant: # noqa: C901, PLR0911, PLR0912 +def as_parameter( # noqa: C901, PLR0911, PLR0912 + details: SerializedParameter, +) -> Parameter | Constant: """Deduces the parameter type from details. Args: @@ -172,7 +174,7 @@ def as_parameter(details: SerializedParameter) -> Parameter | Constant: # noqa: return Float(_x, _y, **rest) # type: ignore case _: raise ValueError( - f"Expected both 'int' or 'float' for bounds but" + "Expected both 'int' or 'float' for bounds but" f" got {type(_x)=} and {type(_y)=}." ) case _: diff --git a/tests/test_neps_space/test_domain__centering.py b/tests/test_neps_space/test_domain__centering.py index 37a236c4e..305487dda 100644 --- a/tests/test_neps_space/test_domain__centering.py +++ b/tests/test_neps_space/test_domain__centering.py @@ -2,15 +2,15 @@ import pytest -from neps.space.neps_spaces import neps_space +import neps.space.neps_spaces.parameters @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max"), [ - (neps_space.ConfidenceLevel.LOW, (50, 10, 90)), - (neps_space.ConfidenceLevel.MEDIUM, (50, 25, 75)), - (neps_space.ConfidenceLevel.HIGH, (50, 40, 60)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, (50, 10, 90)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, (50, 25, 75)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, (50, 40, 60)), ], ) def test_centering_integer( @@ -23,11 +23,11 @@ def test_centering_integer( int_prior = 50 - int1 = neps_space.Integer( + int1 = neps.space.neps_spaces.parameters.Integer( min_value=1, max_value=100, ) - int2 = neps_space.Integer( + int2 = neps.space.neps_spaces.parameters.Integer( min_value=1, max_value=100, prior=int_prior, @@ -59,9 +59,12 @@ def test_centering_integer( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max"), [ - (neps_space.ConfidenceLevel.LOW, (50.0, 10.399999999999999, 89.6)), - (neps_space.ConfidenceLevel.MEDIUM, (50.0, 25.25, 74.75)), - (neps_space.ConfidenceLevel.HIGH, (50.0, 40.1, 59.9)), + ( + neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, + (50.0, 10.399999999999999, 89.6), + ), + (neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, (50.0, 25.25, 74.75)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, (50.0, 40.1, 59.9)), ], ) def test_centering_float( @@ -74,11 +77,11 @@ def test_centering_float( float_prior = 50.0 - float1 = neps_space.Float( + float1 = neps.space.neps_spaces.parameters.Float( min_value=1.0, max_value=100.0, ) - float2 = neps_space.Float( + float2 = neps.space.neps_spaces.parameters.Float( min_value=1.0, max_value=100.0, prior=float_prior, @@ -110,9 +113,9 @@ def test_centering_float( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max_value"), [ - (neps_space.ConfidenceLevel.LOW, (40, 0, 80, 50)), - (neps_space.ConfidenceLevel.MEDIUM, (25, 0, 50, 50)), - (neps_space.ConfidenceLevel.HIGH, (10, 0, 20, 50)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, (40, 0, 80, 50)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, (25, 0, 50, 50)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, (10, 0, 20, 50)), ], ) def test_centering_categorical( @@ -125,10 +128,10 @@ def test_centering_categorical( categorical_prior_index_original = 49 - categorical1 = neps_space.Categorical( + categorical1 = neps.space.neps_spaces.parameters.Categorical( choices=tuple(range(1, 101)), ) - categorical2 = neps_space.Categorical( + categorical2 = neps.space.neps_spaces.parameters.Categorical( choices=tuple(range(1, 101)), prior_index=categorical_prior_index_original, prior_confidence=confidence_level, @@ -167,22 +170,22 @@ def test_centering_categorical( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max"), [ - (neps_space.ConfidenceLevel.LOW, (10, 5, 13)), - (neps_space.ConfidenceLevel.MEDIUM, (10, 7, 13)), - (neps_space.ConfidenceLevel.HIGH, (10, 8, 12)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, (10, 5, 13)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, (10, 7, 13)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, (10, 8, 12)), ], ) def test_centering_stranger_ranges_integer( confidence_level, expected_prior_min_max, ): - int1 = neps_space.Integer( + int1 = neps.space.neps_spaces.parameters.Integer( min_value=1, max_value=13, ) int1_centered = int1.centered_around(10, confidence_level) - int2 = neps_space.Integer( + int2 = neps.space.neps_spaces.parameters.Integer( min_value=1, max_value=13, prior=10, @@ -208,22 +211,25 @@ def test_centering_stranger_ranges_integer( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max"), [ - (neps_space.ConfidenceLevel.LOW, (0.5, 0.09999999999999998, 0.9)), - (neps_space.ConfidenceLevel.MEDIUM, (0.5, 0.25, 0.75)), - (neps_space.ConfidenceLevel.HIGH, (0.5, 0.4, 0.6)), + ( + neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, + (0.5, 0.09999999999999998, 0.9), + ), + (neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, (0.5, 0.25, 0.75)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, (0.5, 0.4, 0.6)), ], ) def test_centering_stranger_ranges_float( confidence_level, expected_prior_min_max, ): - float1 = neps_space.Float( + float1 = neps.space.neps_spaces.parameters.Float( min_value=0.0, max_value=1.0, ) float1_centered = float1.centered_around(0.5, confidence_level) - float2 = neps_space.Float( + float2 = neps.space.neps_spaces.parameters.Float( min_value=0.0, max_value=1.0, prior=0.5, @@ -249,21 +255,21 @@ def test_centering_stranger_ranges_float( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max_value"), [ - (neps_space.ConfidenceLevel.LOW, (2, 0, 5, 2)), - (neps_space.ConfidenceLevel.MEDIUM, (2, 0, 4, 2)), - (neps_space.ConfidenceLevel.HIGH, (1, 0, 2, 2)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, (2, 0, 5, 2)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, (2, 0, 4, 2)), + (neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, (1, 0, 2, 2)), ], ) def test_centering_stranger_ranges_categorical( confidence_level, expected_prior_min_max_value, ): - categorical1 = neps_space.Categorical( + categorical1 = neps.space.neps_spaces.parameters.Categorical( choices=tuple(range(7)), ) categorical1_centered = categorical1.centered_around(2, confidence_level) - categorical2 = neps_space.Categorical( + categorical2 = neps.space.neps_spaces.parameters.Categorical( choices=tuple(range(7)), prior_index=2, prior_confidence=confidence_level, diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index af154c78a..f609c1a18 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -6,6 +6,7 @@ import neps import neps.space.neps_spaces.optimizers.algorithms +import neps.space.neps_spaces.parameters from neps.space.neps_spaces import neps_space @@ -28,117 +29,117 @@ def hyperparameter_pipeline_to_optimize( return objective_to_minimize -class DemoHyperparameterSpace(neps_space.Pipeline): - float1 = neps_space.Float( +class DemoHyperparameterSpace(neps.space.neps_spaces.parameters.Pipeline): + float1 = neps.space.neps_spaces.parameters.Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - float2 = neps_space.Float( + float2 = neps.space.neps_spaces.parameters.Float( min_value=-10, max_value=10, prior=0.1, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - categorical = neps_space.Categorical( + categorical = neps.space.neps_spaces.parameters.Categorical( choices=(0, 1), prior_index=0, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - integer1 = neps_space.Integer( + integer1 = neps.space.neps_spaces.parameters.Integer( min_value=0, max_value=1, prior=0, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - integer2 = neps_space.Integer( + integer2 = neps.space.neps_spaces.parameters.Integer( min_value=1, max_value=1000, prior=10, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) -class DemoHyperparameterWithFidelitySpace(neps_space.Pipeline): - float1 = neps_space.Float( +class DemoHyperparameterWithFidelitySpace(neps.space.neps_spaces.parameters.Pipeline): + float1 = neps.space.neps_spaces.parameters.Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - float2 = neps_space.Float( + float2 = neps.space.neps_spaces.parameters.Float( min_value=-10, max_value=10, prior=0.1, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - categorical = neps_space.Categorical( + categorical = neps.space.neps_spaces.parameters.Categorical( choices=(0, 1), prior_index=0, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - integer1 = neps_space.Integer( + integer1 = neps.space.neps_spaces.parameters.Integer( min_value=0, max_value=1, prior=0, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - integer2 = neps_space.Fidelity( - neps_space.Integer( + integer2 = neps.space.neps_spaces.parameters.Fidelity( + neps.space.neps_spaces.parameters.Integer( min_value=1, max_value=1000, ), ) -class DemoHyperparameterComplexSpace(neps_space.Pipeline): - _small_float = neps_space.Float( +class DemoHyperparameterComplexSpace(neps.space.neps_spaces.parameters.Pipeline): + _small_float = neps.space.neps_spaces.parameters.Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - _big_float = neps_space.Float( + _big_float = neps.space.neps_spaces.parameters.Float( min_value=10, max_value=100, prior=20, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - float1 = neps_space.Categorical( + float1 = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Resampled(_small_float), - neps_space.Resampled(_big_float), + neps.space.neps_spaces.parameters.Resampled(_small_float), + neps.space.neps_spaces.parameters.Resampled(_big_float), ), prior_index=0, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - float2 = neps_space.Categorical( + float2 = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Resampled(_small_float), - neps_space.Resampled(_big_float), + neps.space.neps_spaces.parameters.Resampled(_small_float), + neps.space.neps_spaces.parameters.Resampled(_big_float), float1, ), prior_index=0, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - categorical = neps_space.Categorical( + categorical = neps.space.neps_spaces.parameters.Categorical( choices=(0, 1), prior_index=0, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - integer1 = neps_space.Integer( + integer1 = neps.space.neps_spaces.parameters.Integer( min_value=0, max_value=1, prior=0, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - integer2 = neps_space.Integer( + integer2 = neps.space.neps_spaces.parameters.Integer( min_value=1, max_value=1000, prior=10, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) @@ -274,18 +275,18 @@ def operation_pipeline_to_optimize(model: Model, some_hp: str): return objective_to_minimize -class DemoOperationSpace(neps_space.Pipeline): +class DemoOperationSpace(neps.space.neps_spaces.parameters.Pipeline): """A demonstration of how to use operations in a search space. This space defines a model that can be optimized using different inner functions and a factor. The model can be used to evaluate a set of values and return an objective to minimize. """ # The way to sample `factor` values - _factor = neps_space.Float( + _factor = neps.space.neps_spaces.parameters.Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) # Sum @@ -293,31 +294,31 @@ class DemoOperationSpace(neps_space.Pipeline): # `Sum()` # Could have also been defined using the python `sum` function as # `_sum = space.Operation(operator=lambda: sum)` - _sum = neps_space.Operation(operator=Sum) + _sum = neps.space.neps_spaces.parameters.Operation(operator=Sum) # MultipliedSum # Will be equivalent to something like # `MultipliedSum(factor=0.2)` - _multiplied_sum = neps_space.Operation( + _multiplied_sum = neps.space.neps_spaces.parameters.Operation( operator=MultipliedSum, - kwargs={"factor": neps_space.Resampled(_factor)}, + kwargs={"factor": neps.space.neps_spaces.parameters.Resampled(_factor)}, ) # Model # Will be equivalent to something like one of # `Model(Sum(), factor=0.1)` # `Model(MultipliedSum(factor=0.2), factor=0.1)` - _inner_function = neps_space.Categorical( + _inner_function = neps.space.neps_spaces.parameters.Categorical( choices=(_sum, _multiplied_sum), ) - model = neps_space.Operation( + model = neps.space.neps_spaces.parameters.Operation( operator=Model, args=(_inner_function,), - kwargs={"factor": neps_space.Resampled(_factor)}, + kwargs={"factor": neps.space.neps_spaces.parameters.Resampled(_factor)}, ) # An additional hyperparameter - some_hp = neps_space.Categorical( + some_hp = neps.space.neps_spaces.parameters.Categorical( choices=("hp1", "hp2"), ) diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index 686f4da9a..74a98e8e4 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -9,6 +9,7 @@ import neps.optimizers.algorithms as old_algorithms import neps.space.neps_spaces.optimizers.algorithms import neps.space.neps_spaces.optimizers.bracket_optimizer as new_bracket_optimizer +import neps.space.neps_spaces.parameters from neps.space.neps_spaces import neps_space _COSTS = {} @@ -57,28 +58,28 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): } -class DemoHyperparameterWithFidelitySpace(neps_space.Pipeline): - float1 = neps_space.Float( +class DemoHyperparameterWithFidelitySpace(neps.space.neps_spaces.parameters.Pipeline): + float1 = neps.space.neps_spaces.parameters.Float( min_value=1, max_value=1000, log=False, prior=600, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - float2 = neps_space.Float( + float2 = neps.space.neps_spaces.parameters.Float( min_value=-100, max_value=100, prior=0, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - integer1 = neps_space.Integer( + integer1 = neps.space.neps_spaces.parameters.Integer( min_value=0, max_value=500, prior=35, - prior_confidence=neps_space.ConfidenceLevel.LOW, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, ) - fidelity = neps_space.Fidelity( - domain=neps_space.Integer( + fidelity = neps.space.neps_spaces.parameters.Fidelity( + domain=neps.space.neps_spaces.parameters.Integer( min_value=1, max_value=100, ), diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index ec462a9e6..1f195c79f 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -9,6 +9,7 @@ import neps.optimizers.algorithms as old_algorithms import neps.space.neps_spaces.optimizers.algorithms import neps.space.neps_spaces.optimizers.bracket_optimizer as new_bracket_optimizer +import neps.space.neps_spaces.parameters from neps.space.neps_spaces import neps_space @@ -44,28 +45,28 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): } -class DemoHyperparameterWithFidelitySpace(neps_space.Pipeline): - float1 = neps_space.Float( +class DemoHyperparameterWithFidelitySpace(neps.space.neps_spaces.parameters.Pipeline): + float1 = neps.space.neps_spaces.parameters.Float( min_value=1, max_value=1000, log=False, prior=600, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - float2 = neps_space.Float( + float2 = neps.space.neps_spaces.parameters.Float( min_value=-100, max_value=100, prior=0, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - integer1 = neps_space.Integer( + integer1 = neps.space.neps_spaces.parameters.Integer( min_value=0, max_value=500, prior=35, - prior_confidence=neps_space.ConfidenceLevel.LOW, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, ) - fidelity = neps_space.Fidelity( - domain=neps_space.Integer( + fidelity = neps.space.neps_spaces.parameters.Fidelity( + domain=neps.space.neps_spaces.parameters.Integer( min_value=1, max_value=100, ), diff --git a/tests/test_neps_space/test_search_space__fidelity.py b/tests/test_neps_space/test_search_space__fidelity.py index 5eaa62ffb..6cee0e657 100644 --- a/tests/test_neps_space/test_search_space__fidelity.py +++ b/tests/test_neps_space/test_search_space__fidelity.py @@ -4,20 +4,21 @@ import pytest +import neps.space.neps_spaces.parameters import neps.space.neps_spaces.sampling from neps.space.neps_spaces import neps_space -class DemoHyperparametersWithFidelitySpace(neps_space.Pipeline): +class DemoHyperparametersWithFidelitySpace(neps.space.neps_spaces.parameters.Pipeline): constant1: int = 42 - float1: float = neps_space.Float( + float1: float = neps.space.neps_spaces.parameters.Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ) - fidelity_integer1: int = neps_space.Fidelity( - domain=neps_space.Integer( + fidelity_integer1: int = neps.space.neps_spaces.parameters.Fidelity( + domain=neps.space.neps_spaces.parameters.Integer( min_value=1, max_value=1000, ), @@ -30,12 +31,12 @@ def test_fidelity_creation_raises_when_domain_has_prior(): ValueError, match=re.escape("The domain of a Fidelity can not have priors: "), ): - neps_space.Fidelity( - domain=neps_space.Integer( + neps.space.neps_spaces.parameters.Fidelity( + domain=neps.space.neps_spaces.parameters.Integer( min_value=1, max_value=1000, prior=10, - prior_confidence=neps_space.ConfidenceLevel.MEDIUM, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, ), ) diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index 15a979f4a..9825e1cb6 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -2,139 +2,160 @@ import pytest +import neps.space.neps_spaces.parameters import neps.space.neps_spaces.sampling from neps.space.neps_spaces import config_string, neps_space -class GrammarLike(neps_space.Pipeline): - _id = neps_space.Operation(operator="Identity") - _three = neps_space.Operation(operator="Conv2D-3") - _one = neps_space.Operation(operator="Conv2D-1") - _reluconvbn = neps_space.Operation(operator="ReLUConvBN") +class GrammarLike(neps.space.neps_spaces.parameters.Pipeline): + _id = neps.space.neps_spaces.parameters.Operation(operator="Identity") + _three = neps.space.neps_spaces.parameters.Operation(operator="Conv2D-3") + _one = neps.space.neps_spaces.parameters.Operation(operator="Conv2D-1") + _reluconvbn = neps.space.neps_spaces.parameters.Operation(operator="ReLUConvBN") - _O = neps_space.Categorical(choices=(_three, _one, _id)) + _O = neps.space.neps_spaces.parameters.Categorical(choices=(_three, _one, _id)) - _C0 = neps_space.Operation( + _C0 = neps.space.neps_spaces.parameters.Operation( operator="Sequential", - args=(neps_space.Resampled(_O),), + args=(neps.space.neps_spaces.parameters.Resampled(_O),), ) - _C1 = neps_space.Operation( + _C1 = neps.space.neps_spaces.parameters.Operation( operator="Sequential", - args=(neps_space.Resampled(_O), neps_space.Resampled("S"), _reluconvbn), + args=( + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled("S"), + _reluconvbn, + ), ) - _C2 = neps_space.Operation( + _C2 = neps.space.neps_spaces.parameters.Operation( operator="Sequential", - args=(neps_space.Resampled(_O), neps_space.Resampled("S")), + args=( + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled("S"), + ), ) - _C3 = neps_space.Operation( + _C3 = neps.space.neps_spaces.parameters.Operation( operator="Sequential", - args=(neps_space.Resampled("S"),), + args=(neps.space.neps_spaces.parameters.Resampled("S"),), ) - _C = neps_space.Categorical( + _C = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Resampled(_C0), - neps_space.Resampled(_C1), - neps_space.Resampled(_C2), - neps_space.Resampled(_C3), + neps.space.neps_spaces.parameters.Resampled(_C0), + neps.space.neps_spaces.parameters.Resampled(_C1), + neps.space.neps_spaces.parameters.Resampled(_C2), + neps.space.neps_spaces.parameters.Resampled(_C3), ), ) - _S0 = neps_space.Operation( + _S0 = neps.space.neps_spaces.parameters.Operation( operator="Sequential", - args=(neps_space.Resampled(_C),), + args=(neps.space.neps_spaces.parameters.Resampled(_C),), ) - _S1 = neps_space.Operation( + _S1 = neps.space.neps_spaces.parameters.Operation( operator="Sequential", args=(_reluconvbn,), ) - _S2 = neps_space.Operation( + _S2 = neps.space.neps_spaces.parameters.Operation( operator="Sequential", - args=(neps_space.Resampled("S"),), + args=(neps.space.neps_spaces.parameters.Resampled("S"),), ) - _S3 = neps_space.Operation( + _S3 = neps.space.neps_spaces.parameters.Operation( operator="Sequential", - args=(neps_space.Resampled("S"), neps_space.Resampled(_C)), + args=( + neps.space.neps_spaces.parameters.Resampled("S"), + neps.space.neps_spaces.parameters.Resampled(_C), + ), ) - _S4 = neps_space.Operation( + _S4 = neps.space.neps_spaces.parameters.Operation( operator="Sequential", args=( - neps_space.Resampled(_O), - neps_space.Resampled(_O), - neps_space.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), ), ) - _S5 = neps_space.Operation( + _S5 = neps.space.neps_spaces.parameters.Operation( operator="Sequential", args=( - neps_space.Resampled("S"), - neps_space.Resampled("S"), - neps_space.Resampled(_O), - neps_space.Resampled(_O), - neps_space.Resampled(_O), - neps_space.Resampled(_O), - neps_space.Resampled(_O), - neps_space.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled("S"), + neps.space.neps_spaces.parameters.Resampled("S"), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), ), ) - S = neps_space.Categorical( + S = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Resampled(_S0), - neps_space.Resampled(_S1), - neps_space.Resampled(_S2), - neps_space.Resampled(_S3), - neps_space.Resampled(_S4), - neps_space.Resampled(_S5), + neps.space.neps_spaces.parameters.Resampled(_S0), + neps.space.neps_spaces.parameters.Resampled(_S1), + neps.space.neps_spaces.parameters.Resampled(_S2), + neps.space.neps_spaces.parameters.Resampled(_S3), + neps.space.neps_spaces.parameters.Resampled(_S4), + neps.space.neps_spaces.parameters.Resampled(_S5), ), ) -class GrammarLikeAlt(neps_space.Pipeline): - _id = neps_space.Operation(operator="Identity") - _three = neps_space.Operation(operator="Conv2D-3") - _one = neps_space.Operation(operator="Conv2D-1") - _reluconvbn = neps_space.Operation(operator="ReLUConvBN") +class GrammarLikeAlt(neps.space.neps_spaces.parameters.Pipeline): + _id = neps.space.neps_spaces.parameters.Operation(operator="Identity") + _three = neps.space.neps_spaces.parameters.Operation(operator="Conv2D-3") + _one = neps.space.neps_spaces.parameters.Operation(operator="Conv2D-1") + _reluconvbn = neps.space.neps_spaces.parameters.Operation(operator="ReLUConvBN") - _O = neps_space.Categorical(choices=(_three, _one, _id)) + _O = neps.space.neps_spaces.parameters.Categorical(choices=(_three, _one, _id)) - _C_ARGS = neps_space.Categorical( + _C_ARGS = neps.space.neps_spaces.parameters.Categorical( choices=( - (neps_space.Resampled(_O),), - (neps_space.Resampled(_O), neps_space.Resampled("S"), _reluconvbn), - (neps_space.Resampled(_O), neps_space.Resampled("S")), - (neps_space.Resampled("S"),), + (neps.space.neps_spaces.parameters.Resampled(_O),), + ( + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled("S"), + _reluconvbn, + ), + ( + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled("S"), + ), + (neps.space.neps_spaces.parameters.Resampled("S"),), ), ) - _C = neps_space.Operation( + _C = neps.space.neps_spaces.parameters.Operation( operator="Sequential", - args=neps_space.Resampled(_C_ARGS), + args=neps.space.neps_spaces.parameters.Resampled(_C_ARGS), ) - _S_ARGS = neps_space.Categorical( + _S_ARGS = neps.space.neps_spaces.parameters.Categorical( choices=( - (neps_space.Resampled(_C),), + (neps.space.neps_spaces.parameters.Resampled(_C),), (_reluconvbn,), - (neps_space.Resampled("S"),), - (neps_space.Resampled("S"), neps_space.Resampled(_C)), + (neps.space.neps_spaces.parameters.Resampled("S"),), + ( + neps.space.neps_spaces.parameters.Resampled("S"), + neps.space.neps_spaces.parameters.Resampled(_C), + ), ( - neps_space.Resampled(_O), - neps_space.Resampled(_O), - neps_space.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), ), ( - neps_space.Resampled("S"), - neps_space.Resampled("S"), - neps_space.Resampled(_O), - neps_space.Resampled(_O), - neps_space.Resampled(_O), - neps_space.Resampled(_O), - neps_space.Resampled(_O), - neps_space.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled("S"), + neps.space.neps_spaces.parameters.Resampled("S"), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), + neps.space.neps_spaces.parameters.Resampled(_O), ), ), ) - S = neps_space.Operation( + S = neps.space.neps_spaces.parameters.Operation( operator="Sequential", - args=neps_space.Resampled(_S_ARGS), + args=neps.space.neps_spaces.parameters.Resampled(_S_ARGS), ) diff --git a/tests/test_neps_space/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py index 98bc58c7c..a59f9e3b4 100644 --- a/tests/test_neps_space/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -2,11 +2,12 @@ import pytest +import neps.space.neps_spaces.parameters import neps.space.neps_spaces.sampling from neps.space.neps_spaces import config_string, neps_space -class HNASLikePipeline(neps_space.Pipeline): +class HNASLikePipeline(neps.space.neps_spaces.parameters.Pipeline): """Based on the `hierarchical+shared` variant (cell block is shared everywhere). Across _CONVBLOCK items, _ACT and _CONV also shared. Only the _NORM changes. @@ -18,181 +19,193 @@ class HNASLikePipeline(neps_space.Pipeline): # Adding `PReLU` with a float hyperparameter `init` # Note that the sampled `_prelu_init_value` will be shared across all `_PRELU` uses, # since no `Resampled` was requested for it - _prelu_init_value = neps_space.Float(min_value=0.1, max_value=0.9) - _PRELU = neps_space.Operation( + _prelu_init_value = neps.space.neps_spaces.parameters.Float( + min_value=0.1, max_value=0.9 + ) + _PRELU = neps.space.neps_spaces.parameters.Operation( operator="ACT prelu", kwargs={"init": _prelu_init_value}, ) # ------------------------------------------------------ # Added `_PRELU` to the possible `_ACT` choices - _ACT = neps_space.Categorical( + _ACT = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation(operator="ACT relu"), - neps_space.Operation(operator="ACT hardswish"), - neps_space.Operation(operator="ACT mish"), + neps.space.neps_spaces.parameters.Operation(operator="ACT relu"), + neps.space.neps_spaces.parameters.Operation(operator="ACT hardswish"), + neps.space.neps_spaces.parameters.Operation(operator="ACT mish"), _PRELU, ), ) - _CONV = neps_space.Categorical( + _CONV = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation(operator="CONV conv1x1"), - neps_space.Operation(operator="CONV conv3x3"), - neps_space.Operation(operator="CONV dconv3x3"), + neps.space.neps_spaces.parameters.Operation(operator="CONV conv1x1"), + neps.space.neps_spaces.parameters.Operation(operator="CONV conv3x3"), + neps.space.neps_spaces.parameters.Operation(operator="CONV dconv3x3"), ), ) - _NORM = neps_space.Categorical( + _NORM = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation(operator="NORM batch"), - neps_space.Operation(operator="NORM instance"), - neps_space.Operation(operator="NORM layer"), + neps.space.neps_spaces.parameters.Operation(operator="NORM batch"), + neps.space.neps_spaces.parameters.Operation(operator="NORM instance"), + neps.space.neps_spaces.parameters.Operation(operator="NORM layer"), ), ) - _CONVBLOCK = neps_space.Operation( + _CONVBLOCK = neps.space.neps_spaces.parameters.Operation( operator="CONVBLOCK Sequential3", args=( _ACT, _CONV, - neps_space.Resampled(_NORM), + neps.space.neps_spaces.parameters.Resampled(_NORM), ), ) - _CONVBLOCK_FULL = neps_space.Operation( + _CONVBLOCK_FULL = neps.space.neps_spaces.parameters.Operation( operator="OPS Sequential1", - args=(neps_space.Resampled(_CONVBLOCK),), + args=(neps.space.neps_spaces.parameters.Resampled(_CONVBLOCK),), ) - _OP = neps_space.Categorical( + _OP = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation(operator="OPS zero"), - neps_space.Operation(operator="OPS id"), - neps_space.Operation(operator="OPS avg_pool"), - neps_space.Resampled(_CONVBLOCK_FULL), + neps.space.neps_spaces.parameters.Operation(operator="OPS zero"), + neps.space.neps_spaces.parameters.Operation(operator="OPS id"), + neps.space.neps_spaces.parameters.Operation(operator="OPS avg_pool"), + neps.space.neps_spaces.parameters.Resampled(_CONVBLOCK_FULL), ), ) - CL = neps_space.Operation( + CL = neps.space.neps_spaces.parameters.Operation( operator="CELL Cell", args=( - neps_space.Resampled(_OP), - neps_space.Resampled(_OP), - neps_space.Resampled(_OP), - neps_space.Resampled(_OP), - neps_space.Resampled(_OP), - neps_space.Resampled(_OP), + neps.space.neps_spaces.parameters.Resampled(_OP), + neps.space.neps_spaces.parameters.Resampled(_OP), + neps.space.neps_spaces.parameters.Resampled(_OP), + neps.space.neps_spaces.parameters.Resampled(_OP), + neps.space.neps_spaces.parameters.Resampled(_OP), + neps.space.neps_spaces.parameters.Resampled(_OP), ), ) - _C = neps_space.Categorical( + _C = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation(operator="C Sequential2", args=(CL, CL)), - neps_space.Operation(operator="C Sequential3", args=(CL, CL, CL)), - neps_space.Operation(operator="C Residual2", args=(CL, CL, CL)), + neps.space.neps_spaces.parameters.Operation( + operator="C Sequential2", args=(CL, CL) + ), + neps.space.neps_spaces.parameters.Operation( + operator="C Sequential3", args=(CL, CL, CL) + ), + neps.space.neps_spaces.parameters.Operation( + operator="C Residual2", args=(CL, CL, CL) + ), ), ) - _RESBLOCK = neps_space.Operation(operator="resBlock") - _DOWN = neps_space.Categorical( + _RESBLOCK = neps.space.neps_spaces.parameters.Operation(operator="resBlock") + _DOWN = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation(operator="DOWN Sequential2", args=(CL, _RESBLOCK)), - neps_space.Operation(operator="DOWN Sequential3", args=(CL, CL, _RESBLOCK)), - neps_space.Operation( + neps.space.neps_spaces.parameters.Operation( + operator="DOWN Sequential2", args=(CL, _RESBLOCK) + ), + neps.space.neps_spaces.parameters.Operation( + operator="DOWN Sequential3", args=(CL, CL, _RESBLOCK) + ), + neps.space.neps_spaces.parameters.Operation( operator="DOWN Residual2", args=(CL, _RESBLOCK, _RESBLOCK) ), ), ) - _D0 = neps_space.Categorical( + _D0 = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation( + neps.space.neps_spaces.parameters.Operation( operator="D0 Sequential3", args=( - neps_space.Resampled(_C), - neps_space.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_C), CL, ), ), - neps_space.Operation( + neps.space.neps_spaces.parameters.Operation( operator="D0 Sequential4", args=( - neps_space.Resampled(_C), - neps_space.Resampled(_C), - neps_space.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_C), CL, ), ), - neps_space.Operation( + neps.space.neps_spaces.parameters.Operation( operator="D0 Residual3", args=( - neps_space.Resampled(_C), - neps_space.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_C), CL, CL, ), ), ), ) - _D1 = neps_space.Categorical( + _D1 = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation( + neps.space.neps_spaces.parameters.Operation( operator="D1 Sequential3", args=( - neps_space.Resampled(_C), - neps_space.Resampled(_C), - neps_space.Resampled(_DOWN), + neps.space.neps_spaces.parameters.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_DOWN), ), ), - neps_space.Operation( + neps.space.neps_spaces.parameters.Operation( operator="D1 Sequential4", args=( - neps_space.Resampled(_C), - neps_space.Resampled(_C), - neps_space.Resampled(_C), - neps_space.Resampled(_DOWN), + neps.space.neps_spaces.parameters.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_DOWN), ), ), - neps_space.Operation( + neps.space.neps_spaces.parameters.Operation( operator="D1 Residual3", args=( - neps_space.Resampled(_C), - neps_space.Resampled(_C), - neps_space.Resampled(_DOWN), - neps_space.Resampled(_DOWN), + neps.space.neps_spaces.parameters.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_C), + neps.space.neps_spaces.parameters.Resampled(_DOWN), + neps.space.neps_spaces.parameters.Resampled(_DOWN), ), ), ), ) - _D2 = neps_space.Categorical( + _D2 = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation( + neps.space.neps_spaces.parameters.Operation( operator="D2 Sequential3", args=( - neps_space.Resampled(_D1), - neps_space.Resampled(_D1), - neps_space.Resampled(_D0), + neps.space.neps_spaces.parameters.Resampled(_D1), + neps.space.neps_spaces.parameters.Resampled(_D1), + neps.space.neps_spaces.parameters.Resampled(_D0), ), ), - neps_space.Operation( + neps.space.neps_spaces.parameters.Operation( operator="D2 Sequential3", args=( - neps_space.Resampled(_D0), - neps_space.Resampled(_D1), - neps_space.Resampled(_D1), + neps.space.neps_spaces.parameters.Resampled(_D0), + neps.space.neps_spaces.parameters.Resampled(_D1), + neps.space.neps_spaces.parameters.Resampled(_D1), ), ), - neps_space.Operation( + neps.space.neps_spaces.parameters.Operation( operator="D2 Sequential4", args=( - neps_space.Resampled(_D1), - neps_space.Resampled(_D1), - neps_space.Resampled(_D0), - neps_space.Resampled(_D0), + neps.space.neps_spaces.parameters.Resampled(_D1), + neps.space.neps_spaces.parameters.Resampled(_D1), + neps.space.neps_spaces.parameters.Resampled(_D0), + neps.space.neps_spaces.parameters.Resampled(_D0), ), ), ), ) - ARCH: neps_space.Operation = _D2 + ARCH: neps.space.neps_spaces.parameters.Operation = _D2 @pytest.mark.repeat(500) diff --git a/tests/test_neps_space/test_search_space__nos_like.py b/tests/test_neps_space/test_search_space__nos_like.py index a650837ce..8321802ce 100644 --- a/tests/test_neps_space/test_search_space__nos_like.py +++ b/tests/test_neps_space/test_search_space__nos_like.py @@ -2,107 +2,111 @@ import pytest +import neps.space.neps_spaces.parameters from neps.space.neps_spaces import config_string, neps_space -class NosBench(neps_space.Pipeline): - _UNARY_FUN = neps_space.Categorical( +class NosBench(neps.space.neps_spaces.parameters.Pipeline): + _UNARY_FUN = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation(operator="Square"), - neps_space.Operation(operator="Exp"), - neps_space.Operation(operator="Log"), + neps.space.neps_spaces.parameters.Operation(operator="Square"), + neps.space.neps_spaces.parameters.Operation(operator="Exp"), + neps.space.neps_spaces.parameters.Operation(operator="Log"), ) ) - _BINARY_FUN = neps_space.Categorical( + _BINARY_FUN = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation(operator="Add"), - neps_space.Operation(operator="Sub"), - neps_space.Operation(operator="Mul"), + neps.space.neps_spaces.parameters.Operation(operator="Add"), + neps.space.neps_spaces.parameters.Operation(operator="Sub"), + neps.space.neps_spaces.parameters.Operation(operator="Mul"), ) ) - _TERNARY_FUN = neps_space.Categorical( + _TERNARY_FUN = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation(operator="Interpolate"), - neps_space.Operation(operator="Bias_Correct"), + neps.space.neps_spaces.parameters.Operation(operator="Interpolate"), + neps.space.neps_spaces.parameters.Operation(operator="Bias_Correct"), ) ) - _PARAMS = neps_space.Categorical( + _PARAMS = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation(operator="Params"), - neps_space.Operation(operator="Gradient"), - neps_space.Operation(operator="Opt_Step"), + neps.space.neps_spaces.parameters.Operation(operator="Params"), + neps.space.neps_spaces.parameters.Operation(operator="Gradient"), + neps.space.neps_spaces.parameters.Operation(operator="Opt_Step"), ) ) - _CONST = neps_space.Integer(3, 8) - _VAR = neps_space.Integer(9, 19) + _CONST = neps.space.neps_spaces.parameters.Integer(3, 8) + _VAR = neps.space.neps_spaces.parameters.Integer(9, 19) - _POINTER = neps_space.Categorical( + _POINTER = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Resampled(_PARAMS), - neps_space.Resampled(_CONST), - neps_space.Resampled(_VAR), + neps.space.neps_spaces.parameters.Resampled(_PARAMS), + neps.space.neps_spaces.parameters.Resampled(_CONST), + neps.space.neps_spaces.parameters.Resampled(_VAR), ), ) - _UNARY = neps_space.Operation( + _UNARY = neps.space.neps_spaces.parameters.Operation( operator="Unary", args=( - neps_space.Resampled(_UNARY_FUN), - neps_space.Resampled(_POINTER), + neps.space.neps_spaces.parameters.Resampled(_UNARY_FUN), + neps.space.neps_spaces.parameters.Resampled(_POINTER), ), ) - _BINARY = neps_space.Operation( + _BINARY = neps.space.neps_spaces.parameters.Operation( operator="Binary", args=( - neps_space.Resampled(_BINARY_FUN), - neps_space.Resampled(_POINTER), - neps_space.Resampled(_POINTER), + neps.space.neps_spaces.parameters.Resampled(_BINARY_FUN), + neps.space.neps_spaces.parameters.Resampled(_POINTER), + neps.space.neps_spaces.parameters.Resampled(_POINTER), ), ) - _TERNARY = neps_space.Operation( + _TERNARY = neps.space.neps_spaces.parameters.Operation( operator="Ternary", args=( - neps_space.Resampled(_TERNARY_FUN), - neps_space.Resampled(_POINTER), - neps_space.Resampled(_POINTER), - neps_space.Resampled(_POINTER), + neps.space.neps_spaces.parameters.Resampled(_TERNARY_FUN), + neps.space.neps_spaces.parameters.Resampled(_POINTER), + neps.space.neps_spaces.parameters.Resampled(_POINTER), + neps.space.neps_spaces.parameters.Resampled(_POINTER), ), ) - _F_ARGS = neps_space.Categorical( + _F_ARGS = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Resampled(_UNARY), - neps_space.Resampled(_BINARY), - neps_space.Resampled(_TERNARY), + neps.space.neps_spaces.parameters.Resampled(_UNARY), + neps.space.neps_spaces.parameters.Resampled(_BINARY), + neps.space.neps_spaces.parameters.Resampled(_TERNARY), ), ) - _F = neps_space.Operation( + _F = neps.space.neps_spaces.parameters.Operation( operator="Function", - args=(neps_space.Resampled(_F_ARGS),), - kwargs={"var": neps_space.Resampled(_VAR)}, + args=(neps.space.neps_spaces.parameters.Resampled(_F_ARGS),), + kwargs={"var": neps.space.neps_spaces.parameters.Resampled(_VAR)}, ) - _L_ARGS = neps_space.Categorical( + _L_ARGS = neps.space.neps_spaces.parameters.Categorical( choices=( - (neps_space.Resampled(_F),), - (neps_space.Resampled(_F), neps_space.Resampled("_L")), + (neps.space.neps_spaces.parameters.Resampled(_F),), + ( + neps.space.neps_spaces.parameters.Resampled(_F), + neps.space.neps_spaces.parameters.Resampled("_L"), + ), ), ) - _L = neps_space.Operation( + _L = neps.space.neps_spaces.parameters.Operation( operator="Line_operator", - args=neps_space.Resampled(_L_ARGS), + args=neps.space.neps_spaces.parameters.Resampled(_L_ARGS), ) - P = neps_space.Operation( + P = neps.space.neps_spaces.parameters.Operation( operator="Program", - args=(neps_space.Resampled(_L),), + args=(neps.space.neps_spaces.parameters.Resampled(_L),), ) diff --git a/tests/test_neps_space/test_search_space__recursion.py b/tests/test_neps_space/test_search_space__recursion.py index 07f696df6..3ab7977be 100644 --- a/tests/test_neps_space/test_search_space__recursion.py +++ b/tests/test_neps_space/test_search_space__recursion.py @@ -2,6 +2,7 @@ from collections.abc import Callable, Sequence +import neps.space.neps_spaces.parameters from neps.space.neps_spaces import neps_space @@ -30,12 +31,12 @@ def __call__(self, values: Sequence[float]) -> float: return sum(values) -class DemoRecursiveOperationSpace(neps_space.Pipeline): +class DemoRecursiveOperationSpace(neps.space.neps_spaces.parameters.Pipeline): # The way to sample `factor` values - _factor = neps_space.Float(min_value=0, max_value=1) + _factor = neps.space.neps_spaces.parameters.Float(min_value=0, max_value=1) # Sum - _sum = neps_space.Operation(operator=Sum) + _sum = neps.space.neps_spaces.parameters.Operation(operator=Sum) # Model # Can recursively request itself as an arg. @@ -46,12 +47,12 @@ class DemoRecursiveOperationSpace(neps_space.Pipeline): # ... # If we want the `factor` values to be different, # we just request a resample for them - _inner_function = neps_space.Categorical( - choices=(_sum, neps_space.Resampled("model")), + _inner_function = neps.space.neps_spaces.parameters.Categorical( + choices=(_sum, neps.space.neps_spaces.parameters.Resampled("model")), ) - model = neps_space.Operation( + model = neps.space.neps_spaces.parameters.Operation( operator=Model, - args=(neps_space.Resampled(_inner_function),), + args=(neps.space.neps_spaces.parameters.Resampled(_inner_function),), kwargs={"factor": _factor}, ) diff --git a/tests/test_neps_space/test_search_space__resampled.py b/tests/test_neps_space/test_search_space__resampled.py index 92d8da1fb..b6106f46c 100644 --- a/tests/test_neps_space/test_search_space__resampled.py +++ b/tests/test_neps_space/test_search_space__resampled.py @@ -2,128 +2,135 @@ import pytest +import neps.space.neps_spaces.parameters from neps.space.neps_spaces import neps_space -class ActPipelineSimpleFloat(neps_space.Pipeline): - prelu_init_value = neps_space.Float( +class ActPipelineSimpleFloat(neps.space.neps_spaces.parameters.Pipeline): + prelu_init_value = neps.space.neps_spaces.parameters.Float( min_value=0, max_value=1000000, log=False, prior=0.25, - prior_confidence=neps_space.ConfidenceLevel.LOW, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, ) - prelu_shared1 = neps_space.Operation( + prelu_shared1 = neps.space.neps_spaces.parameters.Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - prelu_shared2 = neps_space.Operation( + prelu_shared2 = neps.space.neps_spaces.parameters.Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - prelu_own_clone1 = neps_space.Operation( + prelu_own_clone1 = neps.space.neps_spaces.parameters.Operation( operator="prelu", - kwargs={"init": neps_space.Resampled(prelu_init_value)}, + kwargs={"init": neps.space.neps_spaces.parameters.Resampled(prelu_init_value)}, ) - prelu_own_clone2 = neps_space.Operation( + prelu_own_clone2 = neps.space.neps_spaces.parameters.Operation( operator="prelu", - kwargs={"init": neps_space.Resampled(prelu_init_value)}, + kwargs={"init": neps.space.neps_spaces.parameters.Resampled(prelu_init_value)}, ) - _prelu_init_resampled = neps_space.Resampled(prelu_init_value) - prelu_common_clone1 = neps_space.Operation( + _prelu_init_resampled = neps.space.neps_spaces.parameters.Resampled(prelu_init_value) + prelu_common_clone1 = neps.space.neps_spaces.parameters.Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, ) - prelu_common_clone2 = neps_space.Operation( + prelu_common_clone2 = neps.space.neps_spaces.parameters.Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, ) -class ActPipelineComplexInteger(neps_space.Pipeline): - prelu_init_value = neps_space.Integer(min_value=0, max_value=1000000) +class ActPipelineComplexInteger(neps.space.neps_spaces.parameters.Pipeline): + prelu_init_value = neps.space.neps_spaces.parameters.Integer( + min_value=0, max_value=1000000 + ) - prelu_shared1 = neps_space.Operation( + prelu_shared1 = neps.space.neps_spaces.parameters.Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - prelu_shared2 = neps_space.Operation( + prelu_shared2 = neps.space.neps_spaces.parameters.Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - prelu_own_clone1 = neps_space.Operation( + prelu_own_clone1 = neps.space.neps_spaces.parameters.Operation( operator="prelu", - kwargs={"init": neps_space.Resampled(prelu_init_value)}, + kwargs={"init": neps.space.neps_spaces.parameters.Resampled(prelu_init_value)}, ) - prelu_own_clone2 = neps_space.Operation( + prelu_own_clone2 = neps.space.neps_spaces.parameters.Operation( operator="prelu", - kwargs={"init": neps_space.Resampled(prelu_init_value)}, + kwargs={"init": neps.space.neps_spaces.parameters.Resampled(prelu_init_value)}, ) - _prelu_init_resampled = neps_space.Resampled(prelu_init_value) - prelu_common_clone1 = neps_space.Operation( + _prelu_init_resampled = neps.space.neps_spaces.parameters.Resampled(prelu_init_value) + prelu_common_clone1 = neps.space.neps_spaces.parameters.Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, ) - prelu_common_clone2 = neps_space.Operation( + prelu_common_clone2 = neps.space.neps_spaces.parameters.Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, ) - act: neps_space.Operation = neps_space.Operation( - operator="sequential6", - args=( - prelu_shared1, - prelu_shared2, - prelu_own_clone1, - prelu_own_clone2, - prelu_common_clone1, - prelu_common_clone2, - ), - kwargs={ - "prelu_shared": prelu_shared1, - "prelu_own_clone": prelu_own_clone1, - "prelu_common_clone": prelu_common_clone1, - "resampled_hp_value": neps_space.Resampled(prelu_init_value), - }, + act: neps.space.neps_spaces.parameters.Operation = ( + neps.space.neps_spaces.parameters.Operation( + operator="sequential6", + args=( + prelu_shared1, + prelu_shared2, + prelu_own_clone1, + prelu_own_clone2, + prelu_common_clone1, + prelu_common_clone2, + ), + kwargs={ + "prelu_shared": prelu_shared1, + "prelu_own_clone": prelu_own_clone1, + "prelu_common_clone": prelu_common_clone1, + "resampled_hp_value": neps.space.neps_spaces.parameters.Resampled( + prelu_init_value + ), + }, + ) ) -class CellPipelineCategorical(neps_space.Pipeline): - conv_block = neps_space.Categorical( +class CellPipelineCategorical(neps.space.neps_spaces.parameters.Pipeline): + conv_block = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Operation(operator="conv1"), - neps_space.Operation(operator="conv2"), + neps.space.neps_spaces.parameters.Operation(operator="conv1"), + neps.space.neps_spaces.parameters.Operation(operator="conv2"), ), ) - op1 = neps_space.Categorical( + op1 = neps.space.neps_spaces.parameters.Categorical( choices=( conv_block, - neps_space.Operation("op1"), + neps.space.neps_spaces.parameters.Operation("op1"), ), ) - op2 = neps_space.Categorical( + op2 = neps.space.neps_spaces.parameters.Categorical( choices=( - neps_space.Resampled(conv_block), - neps_space.Operation("op2"), + neps.space.neps_spaces.parameters.Resampled(conv_block), + neps.space.neps_spaces.parameters.Operation("op2"), ), ) - _resampled_op1 = neps_space.Resampled(op1) - cell = neps_space.Operation( + _resampled_op1 = neps.space.neps_spaces.parameters.Resampled(op1) + cell = neps.space.neps_spaces.parameters.Operation( operator="cell", args=( op1, op2, _resampled_op1, - neps_space.Resampled(op2), + neps.space.neps_spaces.parameters.Resampled(op2), _resampled_op1, - neps_space.Resampled(op2), + neps.space.neps_spaces.parameters.Resampled(op2), ), ) @@ -261,8 +268,8 @@ def test_resampled_categorical(): assert op1 is not pipeline.op1 assert op2 is not pipeline.op2 - assert isinstance(op1, neps_space.Operation) - assert isinstance(op2, neps_space.Operation) + assert isinstance(op1, neps.space.neps_spaces.parameters.Operation) + assert isinstance(op2, neps.space.neps_spaces.parameters.Operation) assert (op1 is conv_block) or (op1.operator == "op1") assert op2.operator in ("conv1", "conv2", "op2") diff --git a/tests/test_neps_space/test_search_space__reuse_arch_elements.py b/tests/test_neps_space/test_search_space__reuse_arch_elements.py index 2cc59a23b..6e2d5e71a 100644 --- a/tests/test_neps_space/test_search_space__reuse_arch_elements.py +++ b/tests/test_neps_space/test_search_space__reuse_arch_elements.py @@ -2,36 +2,43 @@ import pytest +import neps.space.neps_spaces.parameters import neps.space.neps_spaces.sampling from neps.space.neps_spaces import neps_space -class ActPipelineSimple(neps_space.Pipeline): - prelu = neps_space.Operation( +class ActPipelineSimple(neps.space.neps_spaces.parameters.Pipeline): + prelu = neps.space.neps_spaces.parameters.Operation( operator="prelu", kwargs={"init": 0.1}, ) - relu = neps_space.Operation(operator="relu") + relu = neps.space.neps_spaces.parameters.Operation(operator="relu") - act: neps_space.Operation = neps_space.Categorical( - choices=(prelu, relu), + act: neps.space.neps_spaces.parameters.Operation = ( + neps.space.neps_spaces.parameters.Categorical( + choices=(prelu, relu), + ) ) -class ActPipelineComplex(neps_space.Pipeline): - prelu_init_value: float = neps_space.Float(min_value=0.1, max_value=0.9) - prelu = neps_space.Operation( +class ActPipelineComplex(neps.space.neps_spaces.parameters.Pipeline): + prelu_init_value: float = neps.space.neps_spaces.parameters.Float( + min_value=0.1, max_value=0.9 + ) + prelu = neps.space.neps_spaces.parameters.Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - act: neps_space.Operation = neps_space.Categorical( - choices=(prelu,), + act: neps.space.neps_spaces.parameters.Operation = ( + neps.space.neps_spaces.parameters.Categorical( + choices=(prelu,), + ) ) -class FixedPipeline(neps_space.Pipeline): +class FixedPipeline(neps.space.neps_spaces.parameters.Pipeline): prelu_init_value: float = 0.5 - prelu = neps_space.Operation( + prelu = neps.space.neps_spaces.parameters.Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) @@ -41,74 +48,80 @@ class FixedPipeline(neps_space.Pipeline): _conv_choices_low = ("conv1x1", "conv3x3") _conv_choices_high = ("conv5x5", "conv9x9") _conv_choices_prior_confidence_choices = ( - neps_space.ConfidenceLevel.LOW, - neps_space.ConfidenceLevel.MEDIUM, - neps_space.ConfidenceLevel.HIGH, + neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, + neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, ) -class ConvPipeline(neps_space.Pipeline): - conv_choices_prior_index: int = neps_space.Integer( +class ConvPipeline(neps.space.neps_spaces.parameters.Pipeline): + conv_choices_prior_index: int = neps.space.neps_spaces.parameters.Integer( min_value=0, max_value=1, log=False, prior=0, - prior_confidence=neps_space.ConfidenceLevel.LOW, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, ) - conv_choices_prior_confidence: neps_space.ConfidenceLevel = neps_space.Categorical( - choices=_conv_choices_prior_confidence_choices, - prior_index=1, - prior_confidence=neps_space.ConfidenceLevel.LOW, + conv_choices_prior_confidence: neps.space.neps_spaces.parameters.ConfidenceLevel = ( + neps.space.neps_spaces.parameters.Categorical( + choices=_conv_choices_prior_confidence_choices, + prior_index=1, + prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, + ) ) - conv_choices: tuple[str, ...] = neps_space.Categorical( + conv_choices: tuple[str, ...] = neps.space.neps_spaces.parameters.Categorical( choices=(_conv_choices_low, _conv_choices_high), prior_index=conv_choices_prior_index, prior_confidence=conv_choices_prior_confidence, ) - _conv1: str = neps_space.Categorical( + _conv1: str = neps.space.neps_spaces.parameters.Categorical( choices=conv_choices, ) - _conv2: str = neps_space.Categorical( + _conv2: str = neps.space.neps_spaces.parameters.Categorical( choices=conv_choices, ) - conv_block: neps_space.Operation = neps_space.Categorical( - choices=( - neps_space.Operation( - operator="sequential3", - args=[_conv1, _conv2, _conv1], + conv_block: neps.space.neps_spaces.parameters.Operation = ( + neps.space.neps_spaces.parameters.Categorical( + choices=( + neps.space.neps_spaces.parameters.Operation( + operator="sequential3", + args=[_conv1, _conv2, _conv1], + ), ), - ), + ) ) -class CellPipeline(neps_space.Pipeline): - _act = neps_space.Operation(operator="relu") - _conv = neps_space.Operation(operator="conv3x3") - _norm = neps_space.Operation(operator="batch") +class CellPipeline(neps.space.neps_spaces.parameters.Pipeline): + _act = neps.space.neps_spaces.parameters.Operation(operator="relu") + _conv = neps.space.neps_spaces.parameters.Operation(operator="conv3x3") + _norm = neps.space.neps_spaces.parameters.Operation(operator="batch") - conv_block = neps_space.Operation(operator="sequential3", args=(_act, _conv, _norm)) + conv_block = neps.space.neps_spaces.parameters.Operation( + operator="sequential3", args=(_act, _conv, _norm) + ) - op1 = neps_space.Categorical( + op1 = neps.space.neps_spaces.parameters.Categorical( choices=( conv_block, - neps_space.Operation(operator="zero"), - neps_space.Operation(operator="avg_pool"), + neps.space.neps_spaces.parameters.Operation(operator="zero"), + neps.space.neps_spaces.parameters.Operation(operator="avg_pool"), ), ) - op2 = neps_space.Categorical( + op2 = neps.space.neps_spaces.parameters.Categorical( choices=( conv_block, - neps_space.Operation(operator="zero"), - neps_space.Operation(operator="avg_pool"), + neps.space.neps_spaces.parameters.Operation(operator="zero"), + neps.space.neps_spaces.parameters.Operation(operator="avg_pool"), ), ) _some_int = 2 - _some_float = neps_space.Float(min_value=0.5, max_value=0.5) + _some_float = neps.space.neps_spaces.parameters.Float(min_value=0.5, max_value=0.5) - cell = neps_space.Operation( + cell = neps.space.neps_spaces.parameters.Operation( operator="cell", args=(op1, op2, op1, op2, op1, op2), kwargs={"float_hp": _some_float, "int_hp": _some_int}, @@ -296,8 +309,8 @@ def test_shared_complex(): op2 = resolved_pipeline.op2 assert op1 is not pipeline.op1 assert op2 is not pipeline.op2 - assert isinstance(op1, neps_space.Operation) - assert isinstance(op2, neps_space.Operation) + assert isinstance(op1, neps.space.neps_spaces.parameters.Operation) + assert isinstance(op2, neps.space.neps_spaces.parameters.Operation) if op1 is op2: assert op1 is conv_block diff --git a/tests/test_neps_space/utils.py b/tests/test_neps_space/utils.py index 9710bd0f9..837379925 100644 --- a/tests/test_neps_space/utils.py +++ b/tests/test_neps_space/utils.py @@ -2,12 +2,16 @@ from collections.abc import Callable +import neps.space.neps_spaces.parameters from neps.space.neps_spaces import neps_space def generate_possible_config_strings( - pipeline: neps_space.Pipeline, - resolved_pipeline_attr_getter: Callable[[neps_space.Pipeline], neps_space.Operation], + pipeline: neps.space.neps_spaces.parameters.Pipeline, + resolved_pipeline_attr_getter: Callable[ + [neps.space.neps_spaces.parameters.Pipeline], + neps.space.neps_spaces.parameters.Operation, + ], num_resolutions: int = 50_000, ): result = set() From e9294f709d5bee2f24a12496490604d7b5824e9c Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 2 Jul 2025 22:51:18 +0200 Subject: [PATCH 012/156] Refactor parameter imports and class definitions in NEPS space tests - Updated imports in test files to directly import necessary classes from neps.space.neps_spaces.parameters. - Refactored class definitions to inherit directly from Pipeline and other relevant classes. - Replaced instances of parameter class references with direct class references for improved readability. - Ensured consistency across test files by applying similar refactoring patterns. --- .../test_neps_space/test_domain__centering.py | 62 +++--- .../test_neps_space/test_neps_integration.py | 113 ++++++----- ...st_neps_integration_priorband__max_cost.py | 26 ++- ...t_neps_integration_priorband__max_evals.py | 26 ++- .../test_search_space__fidelity.py | 28 ++- .../test_search_space__grammar_like.py | 172 ++++++++-------- .../test_search_space__hnas_like.py | 186 +++++++++--------- .../test_search_space__nos_like.py | 106 +++++----- .../test_search_space__recursion.py | 22 ++- .../test_search_space__resampled.py | 128 ++++++------ .../test_search_space__reuse_arch_elements.py | 115 ++++++----- 11 files changed, 506 insertions(+), 478 deletions(-) diff --git a/tests/test_neps_space/test_domain__centering.py b/tests/test_neps_space/test_domain__centering.py index 305487dda..90ac6719b 100644 --- a/tests/test_neps_space/test_domain__centering.py +++ b/tests/test_neps_space/test_domain__centering.py @@ -2,15 +2,15 @@ import pytest -import neps.space.neps_spaces.parameters +from neps.space.neps_spaces.parameters import Categorical, ConfidenceLevel, Float, Integer @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max"), [ - (neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, (50, 10, 90)), - (neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, (50, 25, 75)), - (neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, (50, 40, 60)), + (ConfidenceLevel.LOW, (50, 10, 90)), + (ConfidenceLevel.MEDIUM, (50, 25, 75)), + (ConfidenceLevel.HIGH, (50, 40, 60)), ], ) def test_centering_integer( @@ -23,11 +23,11 @@ def test_centering_integer( int_prior = 50 - int1 = neps.space.neps_spaces.parameters.Integer( + int1 = Integer( min_value=1, max_value=100, ) - int2 = neps.space.neps_spaces.parameters.Integer( + int2 = Integer( min_value=1, max_value=100, prior=int_prior, @@ -60,11 +60,11 @@ def test_centering_integer( ("confidence_level", "expected_prior_min_max"), [ ( - neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, + ConfidenceLevel.LOW, (50.0, 10.399999999999999, 89.6), ), - (neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, (50.0, 25.25, 74.75)), - (neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, (50.0, 40.1, 59.9)), + (ConfidenceLevel.MEDIUM, (50.0, 25.25, 74.75)), + (ConfidenceLevel.HIGH, (50.0, 40.1, 59.9)), ], ) def test_centering_float( @@ -77,11 +77,11 @@ def test_centering_float( float_prior = 50.0 - float1 = neps.space.neps_spaces.parameters.Float( + float1 = Float( min_value=1.0, max_value=100.0, ) - float2 = neps.space.neps_spaces.parameters.Float( + float2 = Float( min_value=1.0, max_value=100.0, prior=float_prior, @@ -113,9 +113,9 @@ def test_centering_float( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max_value"), [ - (neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, (40, 0, 80, 50)), - (neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, (25, 0, 50, 50)), - (neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, (10, 0, 20, 50)), + (ConfidenceLevel.LOW, (40, 0, 80, 50)), + (ConfidenceLevel.MEDIUM, (25, 0, 50, 50)), + (ConfidenceLevel.HIGH, (10, 0, 20, 50)), ], ) def test_centering_categorical( @@ -128,10 +128,10 @@ def test_centering_categorical( categorical_prior_index_original = 49 - categorical1 = neps.space.neps_spaces.parameters.Categorical( + categorical1 = Categorical( choices=tuple(range(1, 101)), ) - categorical2 = neps.space.neps_spaces.parameters.Categorical( + categorical2 = Categorical( choices=tuple(range(1, 101)), prior_index=categorical_prior_index_original, prior_confidence=confidence_level, @@ -170,22 +170,22 @@ def test_centering_categorical( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max"), [ - (neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, (10, 5, 13)), - (neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, (10, 7, 13)), - (neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, (10, 8, 12)), + (ConfidenceLevel.LOW, (10, 5, 13)), + (ConfidenceLevel.MEDIUM, (10, 7, 13)), + (ConfidenceLevel.HIGH, (10, 8, 12)), ], ) def test_centering_stranger_ranges_integer( confidence_level, expected_prior_min_max, ): - int1 = neps.space.neps_spaces.parameters.Integer( + int1 = Integer( min_value=1, max_value=13, ) int1_centered = int1.centered_around(10, confidence_level) - int2 = neps.space.neps_spaces.parameters.Integer( + int2 = Integer( min_value=1, max_value=13, prior=10, @@ -212,24 +212,24 @@ def test_centering_stranger_ranges_integer( ("confidence_level", "expected_prior_min_max"), [ ( - neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, + ConfidenceLevel.LOW, (0.5, 0.09999999999999998, 0.9), ), - (neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, (0.5, 0.25, 0.75)), - (neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, (0.5, 0.4, 0.6)), + (ConfidenceLevel.MEDIUM, (0.5, 0.25, 0.75)), + (ConfidenceLevel.HIGH, (0.5, 0.4, 0.6)), ], ) def test_centering_stranger_ranges_float( confidence_level, expected_prior_min_max, ): - float1 = neps.space.neps_spaces.parameters.Float( + float1 = Float( min_value=0.0, max_value=1.0, ) float1_centered = float1.centered_around(0.5, confidence_level) - float2 = neps.space.neps_spaces.parameters.Float( + float2 = Float( min_value=0.0, max_value=1.0, prior=0.5, @@ -255,21 +255,21 @@ def test_centering_stranger_ranges_float( @pytest.mark.parametrize( ("confidence_level", "expected_prior_min_max_value"), [ - (neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, (2, 0, 5, 2)), - (neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, (2, 0, 4, 2)), - (neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, (1, 0, 2, 2)), + (ConfidenceLevel.LOW, (2, 0, 5, 2)), + (ConfidenceLevel.MEDIUM, (2, 0, 4, 2)), + (ConfidenceLevel.HIGH, (1, 0, 2, 2)), ], ) def test_centering_stranger_ranges_categorical( confidence_level, expected_prior_min_max_value, ): - categorical1 = neps.space.neps_spaces.parameters.Categorical( + categorical1 = Categorical( choices=tuple(range(7)), ) categorical1_centered = categorical1.centered_around(2, confidence_level) - categorical2 = neps.space.neps_spaces.parameters.Categorical( + categorical2 = Categorical( choices=tuple(range(7)), prior_index=2, prior_confidence=confidence_level, diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index f609c1a18..3e7ed5cff 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -6,8 +6,17 @@ import neps import neps.space.neps_spaces.optimizers.algorithms -import neps.space.neps_spaces.parameters from neps.space.neps_spaces import neps_space +from neps.space.neps_spaces.parameters import ( + Categorical, + ConfidenceLevel, + Fidelity, + Float, + Integer, + Operation, + Pipeline, + Resampled, +) def hyperparameter_pipeline_to_optimize( @@ -29,117 +38,117 @@ def hyperparameter_pipeline_to_optimize( return objective_to_minimize -class DemoHyperparameterSpace(neps.space.neps_spaces.parameters.Pipeline): - float1 = neps.space.neps_spaces.parameters.Float( +class DemoHyperparameterSpace(Pipeline): + float1 = Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - float2 = neps.space.neps_spaces.parameters.Float( + float2 = Float( min_value=-10, max_value=10, prior=0.1, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - categorical = neps.space.neps_spaces.parameters.Categorical( + categorical = Categorical( choices=(0, 1), prior_index=0, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - integer1 = neps.space.neps_spaces.parameters.Integer( + integer1 = Integer( min_value=0, max_value=1, prior=0, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - integer2 = neps.space.neps_spaces.parameters.Integer( + integer2 = Integer( min_value=1, max_value=1000, prior=10, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) -class DemoHyperparameterWithFidelitySpace(neps.space.neps_spaces.parameters.Pipeline): - float1 = neps.space.neps_spaces.parameters.Float( +class DemoHyperparameterWithFidelitySpace(Pipeline): + float1 = Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - float2 = neps.space.neps_spaces.parameters.Float( + float2 = Float( min_value=-10, max_value=10, prior=0.1, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - categorical = neps.space.neps_spaces.parameters.Categorical( + categorical = Categorical( choices=(0, 1), prior_index=0, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - integer1 = neps.space.neps_spaces.parameters.Integer( + integer1 = Integer( min_value=0, max_value=1, prior=0, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - integer2 = neps.space.neps_spaces.parameters.Fidelity( - neps.space.neps_spaces.parameters.Integer( + integer2 = Fidelity( + Integer( min_value=1, max_value=1000, ), ) -class DemoHyperparameterComplexSpace(neps.space.neps_spaces.parameters.Pipeline): - _small_float = neps.space.neps_spaces.parameters.Float( +class DemoHyperparameterComplexSpace(Pipeline): + _small_float = Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - _big_float = neps.space.neps_spaces.parameters.Float( + _big_float = Float( min_value=10, max_value=100, prior=20, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - float1 = neps.space.neps_spaces.parameters.Categorical( + float1 = Categorical( choices=( - neps.space.neps_spaces.parameters.Resampled(_small_float), - neps.space.neps_spaces.parameters.Resampled(_big_float), + Resampled(_small_float), + Resampled(_big_float), ), prior_index=0, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - float2 = neps.space.neps_spaces.parameters.Categorical( + float2 = Categorical( choices=( - neps.space.neps_spaces.parameters.Resampled(_small_float), - neps.space.neps_spaces.parameters.Resampled(_big_float), + Resampled(_small_float), + Resampled(_big_float), float1, ), prior_index=0, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - categorical = neps.space.neps_spaces.parameters.Categorical( + categorical = Categorical( choices=(0, 1), prior_index=0, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - integer1 = neps.space.neps_spaces.parameters.Integer( + integer1 = Integer( min_value=0, max_value=1, prior=0, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - integer2 = neps.space.neps_spaces.parameters.Integer( + integer2 = Integer( min_value=1, max_value=1000, prior=10, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) @@ -275,18 +284,18 @@ def operation_pipeline_to_optimize(model: Model, some_hp: str): return objective_to_minimize -class DemoOperationSpace(neps.space.neps_spaces.parameters.Pipeline): +class DemoOperationSpace(Pipeline): """A demonstration of how to use operations in a search space. This space defines a model that can be optimized using different inner functions and a factor. The model can be used to evaluate a set of values and return an objective to minimize. """ # The way to sample `factor` values - _factor = neps.space.neps_spaces.parameters.Float( + _factor = Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) # Sum @@ -294,31 +303,31 @@ class DemoOperationSpace(neps.space.neps_spaces.parameters.Pipeline): # `Sum()` # Could have also been defined using the python `sum` function as # `_sum = space.Operation(operator=lambda: sum)` - _sum = neps.space.neps_spaces.parameters.Operation(operator=Sum) + _sum = Operation(operator=Sum) # MultipliedSum # Will be equivalent to something like # `MultipliedSum(factor=0.2)` - _multiplied_sum = neps.space.neps_spaces.parameters.Operation( + _multiplied_sum = Operation( operator=MultipliedSum, - kwargs={"factor": neps.space.neps_spaces.parameters.Resampled(_factor)}, + kwargs={"factor": Resampled(_factor)}, ) # Model # Will be equivalent to something like one of # `Model(Sum(), factor=0.1)` # `Model(MultipliedSum(factor=0.2), factor=0.1)` - _inner_function = neps.space.neps_spaces.parameters.Categorical( + _inner_function = Categorical( choices=(_sum, _multiplied_sum), ) - model = neps.space.neps_spaces.parameters.Operation( + model = Operation( operator=Model, args=(_inner_function,), - kwargs={"factor": neps.space.neps_spaces.parameters.Resampled(_factor)}, + kwargs={"factor": Resampled(_factor)}, ) # An additional hyperparameter - some_hp = neps.space.neps_spaces.parameters.Categorical( + some_hp = Categorical( choices=("hp1", "hp2"), ) diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index 74a98e8e4..fb7f17592 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -9,8 +9,14 @@ import neps.optimizers.algorithms as old_algorithms import neps.space.neps_spaces.optimizers.algorithms import neps.space.neps_spaces.optimizers.bracket_optimizer as new_bracket_optimizer -import neps.space.neps_spaces.parameters from neps.space.neps_spaces import neps_space +from neps.space.neps_spaces.parameters import ( + ConfidenceLevel, + Fidelity, + Float, + Integer, + Pipeline, +) _COSTS = {} @@ -58,28 +64,28 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): } -class DemoHyperparameterWithFidelitySpace(neps.space.neps_spaces.parameters.Pipeline): - float1 = neps.space.neps_spaces.parameters.Float( +class DemoHyperparameterWithFidelitySpace(Pipeline): + float1 = Float( min_value=1, max_value=1000, log=False, prior=600, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - float2 = neps.space.neps_spaces.parameters.Float( + float2 = Float( min_value=-100, max_value=100, prior=0, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - integer1 = neps.space.neps_spaces.parameters.Integer( + integer1 = Integer( min_value=0, max_value=500, prior=35, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, + prior_confidence=ConfidenceLevel.LOW, ) - fidelity = neps.space.neps_spaces.parameters.Fidelity( - domain=neps.space.neps_spaces.parameters.Integer( + fidelity = Fidelity( + domain=Integer( min_value=1, max_value=100, ), diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index 1f195c79f..08a8ae9b4 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -9,8 +9,14 @@ import neps.optimizers.algorithms as old_algorithms import neps.space.neps_spaces.optimizers.algorithms import neps.space.neps_spaces.optimizers.bracket_optimizer as new_bracket_optimizer -import neps.space.neps_spaces.parameters from neps.space.neps_spaces import neps_space +from neps.space.neps_spaces.parameters import ( + ConfidenceLevel, + Fidelity, + Float, + Integer, + Pipeline, +) def evaluate_pipeline(float1, float2, integer1, fidelity): @@ -45,28 +51,28 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): } -class DemoHyperparameterWithFidelitySpace(neps.space.neps_spaces.parameters.Pipeline): - float1 = neps.space.neps_spaces.parameters.Float( +class DemoHyperparameterWithFidelitySpace(Pipeline): + float1 = Float( min_value=1, max_value=1000, log=False, prior=600, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - float2 = neps.space.neps_spaces.parameters.Float( + float2 = Float( min_value=-100, max_value=100, prior=0, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - integer1 = neps.space.neps_spaces.parameters.Integer( + integer1 = Integer( min_value=0, max_value=500, prior=35, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, + prior_confidence=ConfidenceLevel.LOW, ) - fidelity = neps.space.neps_spaces.parameters.Fidelity( - domain=neps.space.neps_spaces.parameters.Integer( + fidelity = Fidelity( + domain=Integer( min_value=1, max_value=100, ), diff --git a/tests/test_neps_space/test_search_space__fidelity.py b/tests/test_neps_space/test_search_space__fidelity.py index 6cee0e657..5b15de704 100644 --- a/tests/test_neps_space/test_search_space__fidelity.py +++ b/tests/test_neps_space/test_search_space__fidelity.py @@ -4,21 +4,27 @@ import pytest -import neps.space.neps_spaces.parameters import neps.space.neps_spaces.sampling from neps.space.neps_spaces import neps_space +from neps.space.neps_spaces.parameters import ( + ConfidenceLevel, + Fidelity, + Float, + Integer, + Pipeline, +) -class DemoHyperparametersWithFidelitySpace(neps.space.neps_spaces.parameters.Pipeline): +class DemoHyperparametersWithFidelitySpace(Pipeline): constant1: int = 42 - float1: float = neps.space.neps_spaces.parameters.Float( + float1 = Float( min_value=0, max_value=1, prior=0.1, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ) - fidelity_integer1: int = neps.space.neps_spaces.parameters.Fidelity( - domain=neps.space.neps_spaces.parameters.Integer( + fidelity_integer1 = Fidelity( + domain=Integer( min_value=1, max_value=1000, ), @@ -31,12 +37,12 @@ def test_fidelity_creation_raises_when_domain_has_prior(): ValueError, match=re.escape("The domain of a Fidelity can not have priors: "), ): - neps.space.neps_spaces.parameters.Fidelity( - domain=neps.space.neps_spaces.parameters.Integer( + Fidelity( + domain=Integer( min_value=1, max_value=1000, prior=10, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, + prior_confidence=ConfidenceLevel.MEDIUM, ), ) @@ -83,7 +89,9 @@ def test_fidelity_resolution_works(): ) assert resolved_pipeline.constant1 == 42 - assert 0.0 <= resolved_pipeline.float1 <= 1.0 + assert ( + 0.0 <= float(str(resolved_pipeline.float1)) <= 1.0 + ) # 0.0 <= resolved_pipeline.float1 <= 1.0 also works, but gives a type warning assert resolved_pipeline.fidelity_integer1 == 10 diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index 9825e1cb6..c846659fd 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -2,160 +2,160 @@ import pytest -import neps.space.neps_spaces.parameters import neps.space.neps_spaces.sampling from neps.space.neps_spaces import config_string, neps_space +from neps.space.neps_spaces.parameters import Categorical, Operation, Pipeline, Resampled -class GrammarLike(neps.space.neps_spaces.parameters.Pipeline): - _id = neps.space.neps_spaces.parameters.Operation(operator="Identity") - _three = neps.space.neps_spaces.parameters.Operation(operator="Conv2D-3") - _one = neps.space.neps_spaces.parameters.Operation(operator="Conv2D-1") - _reluconvbn = neps.space.neps_spaces.parameters.Operation(operator="ReLUConvBN") +class GrammarLike(Pipeline): + _id = Operation(operator="Identity") + _three = Operation(operator="Conv2D-3") + _one = Operation(operator="Conv2D-1") + _reluconvbn = Operation(operator="ReLUConvBN") - _O = neps.space.neps_spaces.parameters.Categorical(choices=(_three, _one, _id)) + _O = Categorical(choices=(_three, _one, _id)) - _C0 = neps.space.neps_spaces.parameters.Operation( + _C0 = Operation( operator="Sequential", - args=(neps.space.neps_spaces.parameters.Resampled(_O),), + args=(Resampled(_O),), ) - _C1 = neps.space.neps_spaces.parameters.Operation( + _C1 = Operation( operator="Sequential", args=( - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled("S"), + Resampled(_O), + Resampled("S"), _reluconvbn, ), ) - _C2 = neps.space.neps_spaces.parameters.Operation( + _C2 = Operation( operator="Sequential", args=( - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled("S"), + Resampled(_O), + Resampled("S"), ), ) - _C3 = neps.space.neps_spaces.parameters.Operation( + _C3 = Operation( operator="Sequential", - args=(neps.space.neps_spaces.parameters.Resampled("S"),), + args=(Resampled("S"),), ) - _C = neps.space.neps_spaces.parameters.Categorical( + _C = Categorical( choices=( - neps.space.neps_spaces.parameters.Resampled(_C0), - neps.space.neps_spaces.parameters.Resampled(_C1), - neps.space.neps_spaces.parameters.Resampled(_C2), - neps.space.neps_spaces.parameters.Resampled(_C3), + Resampled(_C0), + Resampled(_C1), + Resampled(_C2), + Resampled(_C3), ), ) - _S0 = neps.space.neps_spaces.parameters.Operation( + _S0 = Operation( operator="Sequential", - args=(neps.space.neps_spaces.parameters.Resampled(_C),), + args=(Resampled(_C),), ) - _S1 = neps.space.neps_spaces.parameters.Operation( + _S1 = Operation( operator="Sequential", args=(_reluconvbn,), ) - _S2 = neps.space.neps_spaces.parameters.Operation( + _S2 = Operation( operator="Sequential", - args=(neps.space.neps_spaces.parameters.Resampled("S"),), + args=(Resampled("S"),), ) - _S3 = neps.space.neps_spaces.parameters.Operation( + _S3 = Operation( operator="Sequential", args=( - neps.space.neps_spaces.parameters.Resampled("S"), - neps.space.neps_spaces.parameters.Resampled(_C), + Resampled("S"), + Resampled(_C), ), ) - _S4 = neps.space.neps_spaces.parameters.Operation( + _S4 = Operation( operator="Sequential", args=( - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), + Resampled(_O), + Resampled(_O), + Resampled(_O), ), ) - _S5 = neps.space.neps_spaces.parameters.Operation( + _S5 = Operation( operator="Sequential", args=( - neps.space.neps_spaces.parameters.Resampled("S"), - neps.space.neps_spaces.parameters.Resampled("S"), - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), + Resampled("S"), + Resampled("S"), + Resampled(_O), + Resampled(_O), + Resampled(_O), + Resampled(_O), + Resampled(_O), + Resampled(_O), ), ) - S = neps.space.neps_spaces.parameters.Categorical( + S = Categorical( choices=( - neps.space.neps_spaces.parameters.Resampled(_S0), - neps.space.neps_spaces.parameters.Resampled(_S1), - neps.space.neps_spaces.parameters.Resampled(_S2), - neps.space.neps_spaces.parameters.Resampled(_S3), - neps.space.neps_spaces.parameters.Resampled(_S4), - neps.space.neps_spaces.parameters.Resampled(_S5), + Resampled(_S0), + Resampled(_S1), + Resampled(_S2), + Resampled(_S3), + Resampled(_S4), + Resampled(_S5), ), ) -class GrammarLikeAlt(neps.space.neps_spaces.parameters.Pipeline): - _id = neps.space.neps_spaces.parameters.Operation(operator="Identity") - _three = neps.space.neps_spaces.parameters.Operation(operator="Conv2D-3") - _one = neps.space.neps_spaces.parameters.Operation(operator="Conv2D-1") - _reluconvbn = neps.space.neps_spaces.parameters.Operation(operator="ReLUConvBN") +class GrammarLikeAlt(Pipeline): + _id = Operation(operator="Identity") + _three = Operation(operator="Conv2D-3") + _one = Operation(operator="Conv2D-1") + _reluconvbn = Operation(operator="ReLUConvBN") - _O = neps.space.neps_spaces.parameters.Categorical(choices=(_three, _one, _id)) + _O = Categorical(choices=(_three, _one, _id)) - _C_ARGS = neps.space.neps_spaces.parameters.Categorical( + _C_ARGS = Categorical( choices=( - (neps.space.neps_spaces.parameters.Resampled(_O),), + (Resampled(_O),), ( - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled("S"), + Resampled(_O), + Resampled("S"), _reluconvbn, ), ( - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled("S"), + Resampled(_O), + Resampled("S"), ), - (neps.space.neps_spaces.parameters.Resampled("S"),), + (Resampled("S"),), ), ) - _C = neps.space.neps_spaces.parameters.Operation( + _C = Operation( operator="Sequential", - args=neps.space.neps_spaces.parameters.Resampled(_C_ARGS), + args=Resampled(_C_ARGS), ) - _S_ARGS = neps.space.neps_spaces.parameters.Categorical( + _S_ARGS = Categorical( choices=( - (neps.space.neps_spaces.parameters.Resampled(_C),), + (Resampled(_C),), (_reluconvbn,), - (neps.space.neps_spaces.parameters.Resampled("S"),), + (Resampled("S"),), ( - neps.space.neps_spaces.parameters.Resampled("S"), - neps.space.neps_spaces.parameters.Resampled(_C), + Resampled("S"), + Resampled(_C), ), ( - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), + Resampled(_O), + Resampled(_O), + Resampled(_O), ), ( - neps.space.neps_spaces.parameters.Resampled("S"), - neps.space.neps_spaces.parameters.Resampled("S"), - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), - neps.space.neps_spaces.parameters.Resampled(_O), + Resampled("S"), + Resampled("S"), + Resampled(_O), + Resampled(_O), + Resampled(_O), + Resampled(_O), + Resampled(_O), + Resampled(_O), ), ), ) - S = neps.space.neps_spaces.parameters.Operation( + S = Operation( operator="Sequential", - args=neps.space.neps_spaces.parameters.Resampled(_S_ARGS), + args=Resampled(_S_ARGS), ) @@ -164,10 +164,9 @@ def test_resolve(): pipeline = GrammarLike() try: - resolved_pipeline, resolution_context = neps_space.resolve(pipeline) + resolved_pipeline, _ = neps_space.resolve(pipeline) except RecursionError: pytest.xfail("XFAIL due to too much recursion.") - raise s = resolved_pipeline.S s_config_string = neps_space.convert_operation_to_string(s) @@ -181,10 +180,9 @@ def test_resolve_alt(): pipeline = GrammarLikeAlt() try: - resolved_pipeline, resolution_context = neps_space.resolve(pipeline) + resolved_pipeline, _ = neps_space.resolve(pipeline) except RecursionError: pytest.xfail("XFAIL due to too much recursion.") - raise s = resolved_pipeline.S s_config_string = neps_space.convert_operation_to_string(s) diff --git a/tests/test_neps_space/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py index a59f9e3b4..87da79178 100644 --- a/tests/test_neps_space/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -2,12 +2,18 @@ import pytest -import neps.space.neps_spaces.parameters import neps.space.neps_spaces.sampling from neps.space.neps_spaces import config_string, neps_space +from neps.space.neps_spaces.parameters import ( + Categorical, + Float, + Operation, + Pipeline, + Resampled, +) -class HNASLikePipeline(neps.space.neps_spaces.parameters.Pipeline): +class HNASLikePipeline(Pipeline): """Based on the `hierarchical+shared` variant (cell block is shared everywhere). Across _CONVBLOCK items, _ACT and _CONV also shared. Only the _NORM changes. @@ -19,193 +25,179 @@ class HNASLikePipeline(neps.space.neps_spaces.parameters.Pipeline): # Adding `PReLU` with a float hyperparameter `init` # Note that the sampled `_prelu_init_value` will be shared across all `_PRELU` uses, # since no `Resampled` was requested for it - _prelu_init_value = neps.space.neps_spaces.parameters.Float( - min_value=0.1, max_value=0.9 - ) - _PRELU = neps.space.neps_spaces.parameters.Operation( + _prelu_init_value = Float(min_value=0.1, max_value=0.9) + _PRELU = Operation( operator="ACT prelu", kwargs={"init": _prelu_init_value}, ) # ------------------------------------------------------ # Added `_PRELU` to the possible `_ACT` choices - _ACT = neps.space.neps_spaces.parameters.Categorical( + _ACT = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation(operator="ACT relu"), - neps.space.neps_spaces.parameters.Operation(operator="ACT hardswish"), - neps.space.neps_spaces.parameters.Operation(operator="ACT mish"), + Operation(operator="ACT relu"), + Operation(operator="ACT hardswish"), + Operation(operator="ACT mish"), _PRELU, ), ) - _CONV = neps.space.neps_spaces.parameters.Categorical( + _CONV = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation(operator="CONV conv1x1"), - neps.space.neps_spaces.parameters.Operation(operator="CONV conv3x3"), - neps.space.neps_spaces.parameters.Operation(operator="CONV dconv3x3"), + Operation(operator="CONV conv1x1"), + Operation(operator="CONV conv3x3"), + Operation(operator="CONV dconv3x3"), ), ) - _NORM = neps.space.neps_spaces.parameters.Categorical( + _NORM = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation(operator="NORM batch"), - neps.space.neps_spaces.parameters.Operation(operator="NORM instance"), - neps.space.neps_spaces.parameters.Operation(operator="NORM layer"), + Operation(operator="NORM batch"), + Operation(operator="NORM instance"), + Operation(operator="NORM layer"), ), ) - _CONVBLOCK = neps.space.neps_spaces.parameters.Operation( + _CONVBLOCK = Operation( operator="CONVBLOCK Sequential3", args=( _ACT, _CONV, - neps.space.neps_spaces.parameters.Resampled(_NORM), + Resampled(_NORM), ), ) - _CONVBLOCK_FULL = neps.space.neps_spaces.parameters.Operation( + _CONVBLOCK_FULL = Operation( operator="OPS Sequential1", - args=(neps.space.neps_spaces.parameters.Resampled(_CONVBLOCK),), + args=(Resampled(_CONVBLOCK),), ) - _OP = neps.space.neps_spaces.parameters.Categorical( + _OP = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation(operator="OPS zero"), - neps.space.neps_spaces.parameters.Operation(operator="OPS id"), - neps.space.neps_spaces.parameters.Operation(operator="OPS avg_pool"), - neps.space.neps_spaces.parameters.Resampled(_CONVBLOCK_FULL), + Operation(operator="OPS zero"), + Operation(operator="OPS id"), + Operation(operator="OPS avg_pool"), + Resampled(_CONVBLOCK_FULL), ), ) - CL = neps.space.neps_spaces.parameters.Operation( + CL = Operation( operator="CELL Cell", args=( - neps.space.neps_spaces.parameters.Resampled(_OP), - neps.space.neps_spaces.parameters.Resampled(_OP), - neps.space.neps_spaces.parameters.Resampled(_OP), - neps.space.neps_spaces.parameters.Resampled(_OP), - neps.space.neps_spaces.parameters.Resampled(_OP), - neps.space.neps_spaces.parameters.Resampled(_OP), + Resampled(_OP), + Resampled(_OP), + Resampled(_OP), + Resampled(_OP), + Resampled(_OP), + Resampled(_OP), ), ) - _C = neps.space.neps_spaces.parameters.Categorical( + _C = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation( - operator="C Sequential2", args=(CL, CL) - ), - neps.space.neps_spaces.parameters.Operation( - operator="C Sequential3", args=(CL, CL, CL) - ), - neps.space.neps_spaces.parameters.Operation( - operator="C Residual2", args=(CL, CL, CL) - ), + Operation(operator="C Sequential2", args=(CL, CL)), + Operation(operator="C Sequential3", args=(CL, CL, CL)), + Operation(operator="C Residual2", args=(CL, CL, CL)), ), ) - _RESBLOCK = neps.space.neps_spaces.parameters.Operation(operator="resBlock") - _DOWN = neps.space.neps_spaces.parameters.Categorical( + _RESBLOCK = Operation(operator="resBlock") + _DOWN = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation( - operator="DOWN Sequential2", args=(CL, _RESBLOCK) - ), - neps.space.neps_spaces.parameters.Operation( - operator="DOWN Sequential3", args=(CL, CL, _RESBLOCK) - ), - neps.space.neps_spaces.parameters.Operation( - operator="DOWN Residual2", args=(CL, _RESBLOCK, _RESBLOCK) - ), + Operation(operator="DOWN Sequential2", args=(CL, _RESBLOCK)), + Operation(operator="DOWN Sequential3", args=(CL, CL, _RESBLOCK)), + Operation(operator="DOWN Residual2", args=(CL, _RESBLOCK, _RESBLOCK)), ), ) - _D0 = neps.space.neps_spaces.parameters.Categorical( + _D0 = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation( + Operation( operator="D0 Sequential3", args=( - neps.space.neps_spaces.parameters.Resampled(_C), - neps.space.neps_spaces.parameters.Resampled(_C), + Resampled(_C), + Resampled(_C), CL, ), ), - neps.space.neps_spaces.parameters.Operation( + Operation( operator="D0 Sequential4", args=( - neps.space.neps_spaces.parameters.Resampled(_C), - neps.space.neps_spaces.parameters.Resampled(_C), - neps.space.neps_spaces.parameters.Resampled(_C), + Resampled(_C), + Resampled(_C), + Resampled(_C), CL, ), ), - neps.space.neps_spaces.parameters.Operation( + Operation( operator="D0 Residual3", args=( - neps.space.neps_spaces.parameters.Resampled(_C), - neps.space.neps_spaces.parameters.Resampled(_C), + Resampled(_C), + Resampled(_C), CL, CL, ), ), ), ) - _D1 = neps.space.neps_spaces.parameters.Categorical( + _D1 = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation( + Operation( operator="D1 Sequential3", args=( - neps.space.neps_spaces.parameters.Resampled(_C), - neps.space.neps_spaces.parameters.Resampled(_C), - neps.space.neps_spaces.parameters.Resampled(_DOWN), + Resampled(_C), + Resampled(_C), + Resampled(_DOWN), ), ), - neps.space.neps_spaces.parameters.Operation( + Operation( operator="D1 Sequential4", args=( - neps.space.neps_spaces.parameters.Resampled(_C), - neps.space.neps_spaces.parameters.Resampled(_C), - neps.space.neps_spaces.parameters.Resampled(_C), - neps.space.neps_spaces.parameters.Resampled(_DOWN), + Resampled(_C), + Resampled(_C), + Resampled(_C), + Resampled(_DOWN), ), ), - neps.space.neps_spaces.parameters.Operation( + Operation( operator="D1 Residual3", args=( - neps.space.neps_spaces.parameters.Resampled(_C), - neps.space.neps_spaces.parameters.Resampled(_C), - neps.space.neps_spaces.parameters.Resampled(_DOWN), - neps.space.neps_spaces.parameters.Resampled(_DOWN), + Resampled(_C), + Resampled(_C), + Resampled(_DOWN), + Resampled(_DOWN), ), ), ), ) - _D2 = neps.space.neps_spaces.parameters.Categorical( + _D2 = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation( + Operation( operator="D2 Sequential3", args=( - neps.space.neps_spaces.parameters.Resampled(_D1), - neps.space.neps_spaces.parameters.Resampled(_D1), - neps.space.neps_spaces.parameters.Resampled(_D0), + Resampled(_D1), + Resampled(_D1), + Resampled(_D0), ), ), - neps.space.neps_spaces.parameters.Operation( + Operation( operator="D2 Sequential3", args=( - neps.space.neps_spaces.parameters.Resampled(_D0), - neps.space.neps_spaces.parameters.Resampled(_D1), - neps.space.neps_spaces.parameters.Resampled(_D1), + Resampled(_D0), + Resampled(_D1), + Resampled(_D1), ), ), - neps.space.neps_spaces.parameters.Operation( + Operation( operator="D2 Sequential4", args=( - neps.space.neps_spaces.parameters.Resampled(_D1), - neps.space.neps_spaces.parameters.Resampled(_D1), - neps.space.neps_spaces.parameters.Resampled(_D0), - neps.space.neps_spaces.parameters.Resampled(_D0), + Resampled(_D1), + Resampled(_D1), + Resampled(_D0), + Resampled(_D0), ), ), ), ) - ARCH: neps.space.neps_spaces.parameters.Operation = _D2 + ARCH: Operation = _D2 @pytest.mark.repeat(500) @@ -222,7 +214,7 @@ def test_hnas_like(): def test_hnas_like_string(): pipeline = HNASLikePipeline() - resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) + resolved_pipeline, _ = neps_space.resolve(pipeline) arch = resolved_pipeline.ARCH arch_config_string = neps_space.convert_operation_to_string(arch) diff --git a/tests/test_neps_space/test_search_space__nos_like.py b/tests/test_neps_space/test_search_space__nos_like.py index 8321802ce..7574c89f7 100644 --- a/tests/test_neps_space/test_search_space__nos_like.py +++ b/tests/test_neps_space/test_search_space__nos_like.py @@ -2,111 +2,117 @@ import pytest -import neps.space.neps_spaces.parameters from neps.space.neps_spaces import config_string, neps_space +from neps.space.neps_spaces.parameters import ( + Categorical, + Integer, + Operation, + Pipeline, + Resampled, +) -class NosBench(neps.space.neps_spaces.parameters.Pipeline): - _UNARY_FUN = neps.space.neps_spaces.parameters.Categorical( +class NosBench(Pipeline): + _UNARY_FUN = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation(operator="Square"), - neps.space.neps_spaces.parameters.Operation(operator="Exp"), - neps.space.neps_spaces.parameters.Operation(operator="Log"), + Operation(operator="Square"), + Operation(operator="Exp"), + Operation(operator="Log"), ) ) - _BINARY_FUN = neps.space.neps_spaces.parameters.Categorical( + _BINARY_FUN = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation(operator="Add"), - neps.space.neps_spaces.parameters.Operation(operator="Sub"), - neps.space.neps_spaces.parameters.Operation(operator="Mul"), + Operation(operator="Add"), + Operation(operator="Sub"), + Operation(operator="Mul"), ) ) - _TERNARY_FUN = neps.space.neps_spaces.parameters.Categorical( + _TERNARY_FUN = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation(operator="Interpolate"), - neps.space.neps_spaces.parameters.Operation(operator="Bias_Correct"), + Operation(operator="Interpolate"), + Operation(operator="Bias_Correct"), ) ) - _PARAMS = neps.space.neps_spaces.parameters.Categorical( + _PARAMS = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation(operator="Params"), - neps.space.neps_spaces.parameters.Operation(operator="Gradient"), - neps.space.neps_spaces.parameters.Operation(operator="Opt_Step"), + Operation(operator="Params"), + Operation(operator="Gradient"), + Operation(operator="Opt_Step"), ) ) - _CONST = neps.space.neps_spaces.parameters.Integer(3, 8) - _VAR = neps.space.neps_spaces.parameters.Integer(9, 19) + _CONST = Integer(3, 8) + _VAR = Integer(9, 19) - _POINTER = neps.space.neps_spaces.parameters.Categorical( + _POINTER = Categorical( choices=( - neps.space.neps_spaces.parameters.Resampled(_PARAMS), - neps.space.neps_spaces.parameters.Resampled(_CONST), - neps.space.neps_spaces.parameters.Resampled(_VAR), + Resampled(_PARAMS), + Resampled(_CONST), + Resampled(_VAR), ), ) - _UNARY = neps.space.neps_spaces.parameters.Operation( + _UNARY = Operation( operator="Unary", args=( - neps.space.neps_spaces.parameters.Resampled(_UNARY_FUN), - neps.space.neps_spaces.parameters.Resampled(_POINTER), + Resampled(_UNARY_FUN), + Resampled(_POINTER), ), ) - _BINARY = neps.space.neps_spaces.parameters.Operation( + _BINARY = Operation( operator="Binary", args=( - neps.space.neps_spaces.parameters.Resampled(_BINARY_FUN), - neps.space.neps_spaces.parameters.Resampled(_POINTER), - neps.space.neps_spaces.parameters.Resampled(_POINTER), + Resampled(_BINARY_FUN), + Resampled(_POINTER), + Resampled(_POINTER), ), ) - _TERNARY = neps.space.neps_spaces.parameters.Operation( + _TERNARY = Operation( operator="Ternary", args=( - neps.space.neps_spaces.parameters.Resampled(_TERNARY_FUN), - neps.space.neps_spaces.parameters.Resampled(_POINTER), - neps.space.neps_spaces.parameters.Resampled(_POINTER), - neps.space.neps_spaces.parameters.Resampled(_POINTER), + Resampled(_TERNARY_FUN), + Resampled(_POINTER), + Resampled(_POINTER), + Resampled(_POINTER), ), ) - _F_ARGS = neps.space.neps_spaces.parameters.Categorical( + _F_ARGS = Categorical( choices=( - neps.space.neps_spaces.parameters.Resampled(_UNARY), - neps.space.neps_spaces.parameters.Resampled(_BINARY), - neps.space.neps_spaces.parameters.Resampled(_TERNARY), + Resampled(_UNARY), + Resampled(_BINARY), + Resampled(_TERNARY), ), ) - _F = neps.space.neps_spaces.parameters.Operation( + _F = Operation( operator="Function", - args=(neps.space.neps_spaces.parameters.Resampled(_F_ARGS),), - kwargs={"var": neps.space.neps_spaces.parameters.Resampled(_VAR)}, + args=(Resampled(_F_ARGS),), + kwargs={"var": Resampled(_VAR)}, ) - _L_ARGS = neps.space.neps_spaces.parameters.Categorical( + _L_ARGS = Categorical( choices=( - (neps.space.neps_spaces.parameters.Resampled(_F),), + (Resampled(_F),), ( - neps.space.neps_spaces.parameters.Resampled(_F), - neps.space.neps_spaces.parameters.Resampled("_L"), + Resampled(_F), + Resampled("_L"), ), ), ) - _L = neps.space.neps_spaces.parameters.Operation( + _L = Operation( operator="Line_operator", - args=neps.space.neps_spaces.parameters.Resampled(_L_ARGS), + args=Resampled(_L_ARGS), ) - P = neps.space.neps_spaces.parameters.Operation( + P = Operation( operator="Program", - args=(neps.space.neps_spaces.parameters.Resampled(_L),), + args=(Resampled(_L),), ) diff --git a/tests/test_neps_space/test_search_space__recursion.py b/tests/test_neps_space/test_search_space__recursion.py index 3ab7977be..70242ed8b 100644 --- a/tests/test_neps_space/test_search_space__recursion.py +++ b/tests/test_neps_space/test_search_space__recursion.py @@ -2,8 +2,14 @@ from collections.abc import Callable, Sequence -import neps.space.neps_spaces.parameters from neps.space.neps_spaces import neps_space +from neps.space.neps_spaces.parameters import ( + Categorical, + Float, + Operation, + Pipeline, + Resampled, +) class Model: @@ -31,12 +37,12 @@ def __call__(self, values: Sequence[float]) -> float: return sum(values) -class DemoRecursiveOperationSpace(neps.space.neps_spaces.parameters.Pipeline): +class DemoRecursiveOperationSpace(Pipeline): # The way to sample `factor` values - _factor = neps.space.neps_spaces.parameters.Float(min_value=0, max_value=1) + _factor = Float(min_value=0, max_value=1) # Sum - _sum = neps.space.neps_spaces.parameters.Operation(operator=Sum) + _sum = Operation(operator=Sum) # Model # Can recursively request itself as an arg. @@ -47,12 +53,12 @@ class DemoRecursiveOperationSpace(neps.space.neps_spaces.parameters.Pipeline): # ... # If we want the `factor` values to be different, # we just request a resample for them - _inner_function = neps.space.neps_spaces.parameters.Categorical( - choices=(_sum, neps.space.neps_spaces.parameters.Resampled("model")), + _inner_function = Categorical( + choices=(_sum, Resampled("model")), ) - model = neps.space.neps_spaces.parameters.Operation( + model = Operation( operator=Model, - args=(neps.space.neps_spaces.parameters.Resampled(_inner_function),), + args=(Resampled(_inner_function),), kwargs={"factor": _factor}, ) diff --git a/tests/test_neps_space/test_search_space__resampled.py b/tests/test_neps_space/test_search_space__resampled.py index b6106f46c..a11d34286 100644 --- a/tests/test_neps_space/test_search_space__resampled.py +++ b/tests/test_neps_space/test_search_space__resampled.py @@ -2,135 +2,137 @@ import pytest -import neps.space.neps_spaces.parameters from neps.space.neps_spaces import neps_space - - -class ActPipelineSimpleFloat(neps.space.neps_spaces.parameters.Pipeline): - prelu_init_value = neps.space.neps_spaces.parameters.Float( +from neps.space.neps_spaces.parameters import ( + Categorical, + ConfidenceLevel, + Float, + Integer, + Operation, + Pipeline, + Resampled, +) + + +class ActPipelineSimpleFloat(Pipeline): + prelu_init_value = Float( min_value=0, max_value=1000000, log=False, prior=0.25, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, + prior_confidence=ConfidenceLevel.LOW, ) - prelu_shared1 = neps.space.neps_spaces.parameters.Operation( + prelu_shared1 = Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - prelu_shared2 = neps.space.neps_spaces.parameters.Operation( + prelu_shared2 = Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - prelu_own_clone1 = neps.space.neps_spaces.parameters.Operation( + prelu_own_clone1 = Operation( operator="prelu", - kwargs={"init": neps.space.neps_spaces.parameters.Resampled(prelu_init_value)}, + kwargs={"init": Resampled(prelu_init_value)}, ) - prelu_own_clone2 = neps.space.neps_spaces.parameters.Operation( + prelu_own_clone2 = Operation( operator="prelu", - kwargs={"init": neps.space.neps_spaces.parameters.Resampled(prelu_init_value)}, + kwargs={"init": Resampled(prelu_init_value)}, ) - _prelu_init_resampled = neps.space.neps_spaces.parameters.Resampled(prelu_init_value) - prelu_common_clone1 = neps.space.neps_spaces.parameters.Operation( + _prelu_init_resampled = Resampled(prelu_init_value) + prelu_common_clone1 = Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, ) - prelu_common_clone2 = neps.space.neps_spaces.parameters.Operation( + prelu_common_clone2 = Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, ) -class ActPipelineComplexInteger(neps.space.neps_spaces.parameters.Pipeline): - prelu_init_value = neps.space.neps_spaces.parameters.Integer( - min_value=0, max_value=1000000 - ) +class ActPipelineComplexInteger(Pipeline): + prelu_init_value = Integer(min_value=0, max_value=1000000) - prelu_shared1 = neps.space.neps_spaces.parameters.Operation( + prelu_shared1 = Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - prelu_shared2 = neps.space.neps_spaces.parameters.Operation( + prelu_shared2 = Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - prelu_own_clone1 = neps.space.neps_spaces.parameters.Operation( + prelu_own_clone1 = Operation( operator="prelu", - kwargs={"init": neps.space.neps_spaces.parameters.Resampled(prelu_init_value)}, + kwargs={"init": Resampled(prelu_init_value)}, ) - prelu_own_clone2 = neps.space.neps_spaces.parameters.Operation( + prelu_own_clone2 = Operation( operator="prelu", - kwargs={"init": neps.space.neps_spaces.parameters.Resampled(prelu_init_value)}, + kwargs={"init": Resampled(prelu_init_value)}, ) - _prelu_init_resampled = neps.space.neps_spaces.parameters.Resampled(prelu_init_value) - prelu_common_clone1 = neps.space.neps_spaces.parameters.Operation( + _prelu_init_resampled = Resampled(prelu_init_value) + prelu_common_clone1 = Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, ) - prelu_common_clone2 = neps.space.neps_spaces.parameters.Operation( + prelu_common_clone2 = Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, ) - act: neps.space.neps_spaces.parameters.Operation = ( - neps.space.neps_spaces.parameters.Operation( - operator="sequential6", - args=( - prelu_shared1, - prelu_shared2, - prelu_own_clone1, - prelu_own_clone2, - prelu_common_clone1, - prelu_common_clone2, - ), - kwargs={ - "prelu_shared": prelu_shared1, - "prelu_own_clone": prelu_own_clone1, - "prelu_common_clone": prelu_common_clone1, - "resampled_hp_value": neps.space.neps_spaces.parameters.Resampled( - prelu_init_value - ), - }, - ) + act: Operation = Operation( + operator="sequential6", + args=( + prelu_shared1, + prelu_shared2, + prelu_own_clone1, + prelu_own_clone2, + prelu_common_clone1, + prelu_common_clone2, + ), + kwargs={ + "prelu_shared": prelu_shared1, + "prelu_own_clone": prelu_own_clone1, + "prelu_common_clone": prelu_common_clone1, + "resampled_hp_value": Resampled(prelu_init_value), + }, ) -class CellPipelineCategorical(neps.space.neps_spaces.parameters.Pipeline): - conv_block = neps.space.neps_spaces.parameters.Categorical( +class CellPipelineCategorical(Pipeline): + conv_block = Categorical( choices=( - neps.space.neps_spaces.parameters.Operation(operator="conv1"), - neps.space.neps_spaces.parameters.Operation(operator="conv2"), + Operation(operator="conv1"), + Operation(operator="conv2"), ), ) - op1 = neps.space.neps_spaces.parameters.Categorical( + op1 = Categorical( choices=( conv_block, - neps.space.neps_spaces.parameters.Operation("op1"), + Operation("op1"), ), ) - op2 = neps.space.neps_spaces.parameters.Categorical( + op2 = Categorical( choices=( - neps.space.neps_spaces.parameters.Resampled(conv_block), - neps.space.neps_spaces.parameters.Operation("op2"), + Resampled(conv_block), + Operation("op2"), ), ) - _resampled_op1 = neps.space.neps_spaces.parameters.Resampled(op1) - cell = neps.space.neps_spaces.parameters.Operation( + _resampled_op1 = Resampled(op1) + cell = Operation( operator="cell", args=( op1, op2, _resampled_op1, - neps.space.neps_spaces.parameters.Resampled(op2), + Resampled(op2), _resampled_op1, - neps.space.neps_spaces.parameters.Resampled(op2), + Resampled(op2), ), ) @@ -268,8 +270,8 @@ def test_resampled_categorical(): assert op1 is not pipeline.op1 assert op2 is not pipeline.op2 - assert isinstance(op1, neps.space.neps_spaces.parameters.Operation) - assert isinstance(op2, neps.space.neps_spaces.parameters.Operation) + assert isinstance(op1, Operation) + assert isinstance(op2, Operation) assert (op1 is conv_block) or (op1.operator == "op1") assert op2.operator in ("conv1", "conv2", "op2") diff --git a/tests/test_neps_space/test_search_space__reuse_arch_elements.py b/tests/test_neps_space/test_search_space__reuse_arch_elements.py index 6e2d5e71a..70993256e 100644 --- a/tests/test_neps_space/test_search_space__reuse_arch_elements.py +++ b/tests/test_neps_space/test_search_space__reuse_arch_elements.py @@ -2,43 +2,44 @@ import pytest -import neps.space.neps_spaces.parameters import neps.space.neps_spaces.sampling from neps.space.neps_spaces import neps_space +from neps.space.neps_spaces.parameters import ( + Categorical, + ConfidenceLevel, + Float, + Integer, + Operation, + Pipeline, +) -class ActPipelineSimple(neps.space.neps_spaces.parameters.Pipeline): - prelu = neps.space.neps_spaces.parameters.Operation( +class ActPipelineSimple(Pipeline): + prelu = Operation( operator="prelu", kwargs={"init": 0.1}, ) - relu = neps.space.neps_spaces.parameters.Operation(operator="relu") + relu = Operation(operator="relu") - act: neps.space.neps_spaces.parameters.Operation = ( - neps.space.neps_spaces.parameters.Categorical( - choices=(prelu, relu), - ) + act: Operation = Categorical( + choices=(prelu, relu), ) -class ActPipelineComplex(neps.space.neps_spaces.parameters.Pipeline): - prelu_init_value: float = neps.space.neps_spaces.parameters.Float( - min_value=0.1, max_value=0.9 - ) - prelu = neps.space.neps_spaces.parameters.Operation( +class ActPipelineComplex(Pipeline): + prelu_init_value: float = Float(min_value=0.1, max_value=0.9) + prelu = Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) - act: neps.space.neps_spaces.parameters.Operation = ( - neps.space.neps_spaces.parameters.Categorical( - choices=(prelu,), - ) + act: Operation = Categorical( + choices=(prelu,), ) -class FixedPipeline(neps.space.neps_spaces.parameters.Pipeline): +class FixedPipeline(Pipeline): prelu_init_value: float = 0.5 - prelu = neps.space.neps_spaces.parameters.Operation( + prelu = Operation( operator="prelu", kwargs={"init": prelu_init_value}, ) @@ -48,80 +49,74 @@ class FixedPipeline(neps.space.neps_spaces.parameters.Pipeline): _conv_choices_low = ("conv1x1", "conv3x3") _conv_choices_high = ("conv5x5", "conv9x9") _conv_choices_prior_confidence_choices = ( - neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, - neps.space.neps_spaces.parameters.ConfidenceLevel.MEDIUM, - neps.space.neps_spaces.parameters.ConfidenceLevel.HIGH, + ConfidenceLevel.LOW, + ConfidenceLevel.MEDIUM, + ConfidenceLevel.HIGH, ) -class ConvPipeline(neps.space.neps_spaces.parameters.Pipeline): - conv_choices_prior_index: int = neps.space.neps_spaces.parameters.Integer( +class ConvPipeline(Pipeline): + conv_choices_prior_index: int = Integer( min_value=0, max_value=1, log=False, prior=0, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, + prior_confidence=ConfidenceLevel.LOW, ) - conv_choices_prior_confidence: neps.space.neps_spaces.parameters.ConfidenceLevel = ( - neps.space.neps_spaces.parameters.Categorical( - choices=_conv_choices_prior_confidence_choices, - prior_index=1, - prior_confidence=neps.space.neps_spaces.parameters.ConfidenceLevel.LOW, - ) + conv_choices_prior_confidence: ConfidenceLevel = Categorical( + choices=_conv_choices_prior_confidence_choices, + prior_index=1, + prior_confidence=ConfidenceLevel.LOW, ) - conv_choices: tuple[str, ...] = neps.space.neps_spaces.parameters.Categorical( + conv_choices: tuple[str, ...] = Categorical( choices=(_conv_choices_low, _conv_choices_high), prior_index=conv_choices_prior_index, prior_confidence=conv_choices_prior_confidence, ) - _conv1: str = neps.space.neps_spaces.parameters.Categorical( + _conv1: str = Categorical( choices=conv_choices, ) - _conv2: str = neps.space.neps_spaces.parameters.Categorical( + _conv2: str = Categorical( choices=conv_choices, ) - conv_block: neps.space.neps_spaces.parameters.Operation = ( - neps.space.neps_spaces.parameters.Categorical( - choices=( - neps.space.neps_spaces.parameters.Operation( - operator="sequential3", - args=[_conv1, _conv2, _conv1], - ), + conv_block: Operation = Categorical( + choices=( + Operation( + operator="sequential3", + args=[_conv1, _conv2, _conv1], ), - ) + ), ) -class CellPipeline(neps.space.neps_spaces.parameters.Pipeline): - _act = neps.space.neps_spaces.parameters.Operation(operator="relu") - _conv = neps.space.neps_spaces.parameters.Operation(operator="conv3x3") - _norm = neps.space.neps_spaces.parameters.Operation(operator="batch") +class CellPipeline(Pipeline): + _act = Operation(operator="relu") + _conv = Operation(operator="conv3x3") + _norm = Operation(operator="batch") - conv_block = neps.space.neps_spaces.parameters.Operation( - operator="sequential3", args=(_act, _conv, _norm) - ) + conv_block = Operation(operator="sequential3", args=(_act, _conv, _norm)) - op1 = neps.space.neps_spaces.parameters.Categorical( + op1 = Categorical( choices=( conv_block, - neps.space.neps_spaces.parameters.Operation(operator="zero"), - neps.space.neps_spaces.parameters.Operation(operator="avg_pool"), + Operation(operator="zero"), + Operation(operator="avg_pool"), ), ) - op2 = neps.space.neps_spaces.parameters.Categorical( + op2 = Categorical( choices=( conv_block, - neps.space.neps_spaces.parameters.Operation(operator="zero"), - neps.space.neps_spaces.parameters.Operation(operator="avg_pool"), + Operation(operator="zero"), + Operation(operator="avg_pool"), ), ) _some_int = 2 - _some_float = neps.space.neps_spaces.parameters.Float(min_value=0.5, max_value=0.5) + _some_float = Float(min_value=0.5, max_value=0.5) - cell = neps.space.neps_spaces.parameters.Operation( + cell = Operation( operator="cell", args=(op1, op2, op1, op2, op1, op2), kwargs={"float_hp": _some_float, "int_hp": _some_int}, @@ -189,7 +184,7 @@ def test_nested_complex(): def test_nested_complex_string(): pipeline = ActPipelineComplex() - resolved_pipeline, sampled_values = neps_space.resolve(pipeline) + resolved_pipeline, _ = neps_space.resolve(pipeline) act = resolved_pipeline.act act_config_string = neps_space.convert_operation_to_string(act) @@ -309,8 +304,8 @@ def test_shared_complex(): op2 = resolved_pipeline.op2 assert op1 is not pipeline.op1 assert op2 is not pipeline.op2 - assert isinstance(op1, neps.space.neps_spaces.parameters.Operation) - assert isinstance(op2, neps.space.neps_spaces.parameters.Operation) + assert isinstance(op1, Operation) + assert isinstance(op2, Operation) if op1 is op2: assert op1 is conv_block From 3b58495833e213efc2a8b8d7afed0b57d5243be6 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 3 Jul 2025 13:30:56 +0200 Subject: [PATCH 013/156] Refactor NEPS space integration tests to remove redundant evaluation pipeline adjustments --- neps/api.py | 11 +++++++++- .../test_neps_space/test_neps_integration.py | 21 ++++--------------- ...st_neps_integration_priorband__max_cost.py | 6 +----- ...t_neps_integration_priorband__max_evals.py | 6 +----- .../test_search_space__fidelity.py | 2 +- 5 files changed, 17 insertions(+), 29 deletions(-) diff --git a/neps/api.py b/neps/api.py index 396dca0d6..aa4a069eb 100644 --- a/neps/api.py +++ b/neps/api.py @@ -10,6 +10,8 @@ from neps.optimizers import AskFunction, OptimizerChoice, load_optimizer from neps.runtime import _launch_runtime +from neps.space.neps_spaces.neps_space import adjust_evaluation_pipeline_for_neps_space +from neps.space.neps_spaces.parameters import Pipeline from neps.space.parsing import convert_to_space from neps.status.status import post_run_csv from neps.utils.common import dynamic_load_object @@ -19,7 +21,6 @@ from neps.optimizers.algorithms import CustomOptimizer from neps.space import Parameter, SearchSpace - from neps.space.neps_spaces.parameters import Pipeline from neps.state import EvaluatePipelineReturn logger = logging.getLogger(__name__) @@ -31,6 +32,7 @@ def run( # noqa: PLR0913 Mapping[str, dict | str | int | float | Parameter] | SearchSpace | ConfigurationSpace + | Pipeline ), *, root_directory: str | Path = "neps_results", @@ -412,6 +414,13 @@ def __call__( ) logger.info(f"Starting neps.run using root directory {root_directory}") + + if isinstance(pipeline_space, Pipeline): + assert not isinstance(evaluate_pipeline, str) + evaluate_pipeline = adjust_evaluation_pipeline_for_neps_space( + evaluate_pipeline, pipeline_space + ) + space = convert_to_space(pipeline_space) _optimizer_ask, _optimizer_info = load_optimizer(optimizer=optimizer, space=space) diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index 3e7ed5cff..d122dbbcd 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -6,7 +6,6 @@ import neps import neps.space.neps_spaces.optimizers.algorithms -from neps.space.neps_spaces import neps_space from neps.space.neps_spaces.parameters import ( Categorical, ConfidenceLevel, @@ -164,10 +163,7 @@ def test_hyperparameter_demo(optimizer): root_directory = f"results/hyperparameter_demo__{optimizer.__name__}" neps.run( - evaluate_pipeline=neps_space.adjust_evaluation_pipeline_for_neps_space( - hyperparameter_pipeline_to_optimize, - pipeline_space, - ), + evaluate_pipeline=hyperparameter_pipeline_to_optimize, pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, @@ -190,10 +186,7 @@ def test_hyperparameter_with_fidelity_demo(optimizer): root_directory = f"results/hyperparameter_with_fidelity_demo__{optimizer.__name__}" neps.run( - evaluate_pipeline=neps_space.adjust_evaluation_pipeline_for_neps_space( - hyperparameter_pipeline_to_optimize, - pipeline_space, - ), + evaluate_pipeline=hyperparameter_pipeline_to_optimize, pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, @@ -216,10 +209,7 @@ def test_hyperparameter_complex_demo(optimizer): root_directory = f"results/hyperparameter_complex_demo__{optimizer.__name__}" neps.run( - evaluate_pipeline=neps_space.adjust_evaluation_pipeline_for_neps_space( - hyperparameter_pipeline_to_optimize, - pipeline_space, - ), + evaluate_pipeline=hyperparameter_pipeline_to_optimize, pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, @@ -344,10 +334,7 @@ def test_operation_demo(optimizer): root_directory = f"results/operation_demo__{optimizer.__name__}" neps.run( - evaluate_pipeline=neps_space.adjust_evaluation_pipeline_for_neps_space( - operation_pipeline_to_optimize, - pipeline_space, - ), + evaluate_pipeline=hyperparameter_pipeline_to_optimize, pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index fb7f17592..0f6c8ca76 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -9,7 +9,6 @@ import neps.optimizers.algorithms as old_algorithms import neps.space.neps_spaces.optimizers.algorithms import neps.space.neps_spaces.optimizers.bracket_optimizer as new_bracket_optimizer -from neps.space.neps_spaces import neps_space from neps.space.neps_spaces.parameters import ( ConfidenceLevel, Fidelity, @@ -130,10 +129,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): _COSTS.clear() neps.run( - evaluate_pipeline=neps_space.adjust_evaluation_pipeline_for_neps_space( - evaluate_pipeline, - pipeline_space, - ), + evaluate_pipeline=evaluate_pipeline, pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index 08a8ae9b4..cb606f7f3 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -9,7 +9,6 @@ import neps.optimizers.algorithms as old_algorithms import neps.space.neps_spaces.optimizers.algorithms import neps.space.neps_spaces.optimizers.bracket_optimizer as new_bracket_optimizer -from neps.space.neps_spaces import neps_space from neps.space.neps_spaces.parameters import ( ConfidenceLevel, Fidelity, @@ -114,10 +113,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): root_directory = f"results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" neps.run( - evaluate_pipeline=neps_space.adjust_evaluation_pipeline_for_neps_space( - evaluate_pipeline, - pipeline_space, - ), + evaluate_pipeline=evaluate_pipeline, pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, diff --git a/tests/test_neps_space/test_search_space__fidelity.py b/tests/test_neps_space/test_search_space__fidelity.py index 5b15de704..8a76a2f73 100644 --- a/tests/test_neps_space/test_search_space__fidelity.py +++ b/tests/test_neps_space/test_search_space__fidelity.py @@ -83,7 +83,7 @@ def test_fidelity_resolution_works(): # Resolve a pipeline which contains a fidelity, # with a valid value for it in the environment. - resolved_pipeline, resolution_context = neps_space.resolve( + resolved_pipeline, _ = neps_space.resolve( pipeline=pipeline, environment_values={"fidelity_integer1": 10}, ) From 9bc6ca8ee455780520e97fd65f2ac89304d21f40 Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 4 Jul 2025 19:41:47 +0200 Subject: [PATCH 014/156] Add Pytorch Neural Network example --- .../neps_spaces/pytorch_nn_example.ipynb | 379 ++++++++++++++++++ 1 file changed, 379 insertions(+) create mode 100644 neps_examples/neps_spaces/pytorch_nn_example.ipynb diff --git a/neps_examples/neps_spaces/pytorch_nn_example.ipynb b/neps_examples/neps_spaces/pytorch_nn_example.ipynb new file mode 100644 index 000000000..c938188f8 --- /dev/null +++ b/neps_examples/neps_spaces/pytorch_nn_example.ipynb @@ -0,0 +1,379 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 23, + "id": "e61a523f", + "metadata": {}, + "outputs": [], + "source": [ + "from neps.space.neps_spaces.parameters import Pipeline, Operation, Categorical, Resampled\n", + "\n", + "import torch\n", + "import torch.nn as nn" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "f3ca063f", + "metadata": {}, + "outputs": [], + "source": [ + "# Define the neural network architecture using PyTorch as usual\n", + "\n", + "class ReLUConvBN(nn.Module):\n", + " def __init__(self, out_channels, kernel_size, stride, padding):\n", + " super().__init__()\n", + "\n", + " self.kernel_size = kernel_size\n", + " self.op = nn.Sequential(\n", + " nn.ReLU(inplace=False),\n", + " nn.LazyConv2d(\n", + " out_channels=out_channels,\n", + " kernel_size=kernel_size,\n", + " stride=stride,\n", + " padding=padding,\n", + " dilation=2,\n", + " bias=False,\n", + " ),\n", + " nn.LazyBatchNorm2d(affine=True, track_running_stats=True),\n", + " )\n", + "\n", + " def forward(self, x):\n", + " return self.op(x)\n", + "\n", + "\n", + "class Identity(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + "\n", + " def forward(self, x):\n", + " return x\n", + " \n" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "4bda71ce", + "metadata": {}, + "outputs": [], + "source": [ + "class NN_Space(Pipeline):\n", + " _id = Operation(operator=Identity)\n", + " _three = Operation(operator=nn.Conv2d,kwargs={\"in_channels\":3, \"out_channels\":3, \"kernel_size\":3, \"stride\":1, \"padding\":1})\n", + " _one = Operation(operator=nn.Conv2d,kwargs={\"in_channels\":3, \"out_channels\":3, \"kernel_size\":1, \"stride\":1, \"padding\":0})\n", + " _reluconvbn = Operation(operator=ReLUConvBN, kwargs={\"out_channels\":3, \"kernel_size\":3, \"stride\":1, \"padding\":1})\n", + "\n", + " _O = Categorical(choices=(_three, _one, _id))\n", + "\n", + " _C_ARGS = Categorical(\n", + " choices=(\n", + " (Resampled(_O),),\n", + " (Resampled(_O), Resampled(\"model\"), _reluconvbn),\n", + " (Resampled(_O), Resampled(\"model\")),\n", + " (Resampled(\"model\"),),\n", + " ),\n", + " )\n", + " _C = Operation(\n", + " operator=nn.Sequential,\n", + " args=Resampled(_C_ARGS),\n", + " )\n", + "\n", + " _model_ARGS = Categorical(\n", + " choices=(\n", + " (Resampled(_C),),\n", + " (_reluconvbn,),\n", + " (Resampled(\"model\"),),\n", + " (Resampled(\"model\"), Resampled(_C)),\n", + " (Resampled(_O), Resampled(_O), Resampled(_O)),\n", + " (\n", + " Resampled(\"model\"),\n", + " Resampled(\"model\"),\n", + " Resampled(_O),\n", + " Resampled(_O),\n", + " Resampled(_O),\n", + " Resampled(_O),\n", + " Resampled(_O),\n", + " Resampled(_O),\n", + " ),\n", + " ),\n", + " )\n", + " model = Operation(\n", + " operator=nn.Sequential,\n", + " args=Resampled(_model_ARGS),\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "17005669", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Callable:\n", + "\n", + "Sequential(\n", + " (0): Identity()\n", + " (1): Conv2d(3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (2): Conv2d(3, 3, kernel_size=(1, 1), stride=(1, 1))\n", + ")\n", + "\n", + "\n", + "Config string:\n", + "\n", + "( () () ())\n", + "\t01 :: \n", + "\t\t02 :: \n", + "\t\t02 :: \n", + "\t\t02 :: \n" + ] + } + ], + "source": [ + "# Sampling and printing one random configuration of the pipeline\n", + "\n", + "from neps.space.neps_spaces import neps_space\n", + "\n", + "pipeline = NN_Space()\n", + "resolved_pipeline, resolution_context = neps_space.resolve(pipeline)\n", + "\n", + "s = resolved_pipeline.model\n", + "s_config_string = neps_space.convert_operation_to_string(s)\n", + "pretty_config = neps_space.config_string.ConfigString(s_config_string).pretty_format()\n", + "s_callable = neps_space.convert_operation_to_callable(s)\n", + "\n", + "print(\"Callable:\\n\")\n", + "print(s_callable)\n", + "\n", + "print(\"\\n\\nConfig string:\\n\")\n", + "print(pretty_config)" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "9efeb556", + "metadata": {}, + "outputs": [], + "source": [ + "# Defining the pipeline, using the model from the NN_space space as callable\n", + "\n", + "import numpy as np\n", + "\n", + "def evaluate_pipeline(model: nn.Sequential):\n", + " x = torch.ones(size=[1, 3, 220, 220])\n", + " result = np.sum(model(x).detach().numpy().flatten())\n", + " return result" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "fa9cabbf", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# Configs: 5\n", + "\n", + " success: 5\n", + "\n", + "# Best Found (config 4):\n", + "\n", + " objective_to_minimize: -0.00634765625\n", + " config: {'SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1}\n", + " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spaces\\results\\neps_spaces_nn_example\\configs\\config_4\n" + ] + }, + { + "data": { + "text/plain": [ + "( config.SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 \\\n", + " id \n", + " 1 1 \n", + " 2 4 \n", + " 3 0 \n", + " 4 2 \n", + " 5 2 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 \n", + " 2 1 \n", + " 3 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[1].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 \n", + " 2 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[2].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 \n", + " 2 0 \n", + " 3 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__4 \\\n", + " id \n", + " 1 \n", + " 2 \n", + " 3 2 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 \n", + " 2 \n", + " 3 2 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \\\n", + " id \n", + " 1 \n", + " 2 \n", + " 3 3 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", + " id \n", + " 1 \n", + " 2 \n", + " 3 1 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \\\n", + " id \n", + " 1 \n", + " 2 \n", + " 3 0 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 \n", + " 2 \n", + " 3 2 \n", + " 4 \n", + " 5 \n", + " \n", + " ... reported_as evaluation_duration \\\n", + " id ... \n", + " 1 ... success 0.003479 \n", + " 2 ... success 0.001969 \n", + " 3 ... success 0.003328 \n", + " 4 ... success 0.077663 \n", + " 5 ... success 0.001381 \n", + " \n", + " location state \\\n", + " id \n", + " 1 C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spa... State.SUCCESS \n", + " 2 C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spa... State.SUCCESS \n", + " 3 C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spa... State.SUCCESS \n", + " 4 C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spa... State.SUCCESS \n", + " 5 C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spa... State.SUCCESS \n", + " \n", + " sampling_worker_id time_sampled \\\n", + " id \n", + " 1 3916-2025-07-04T17:41:16.732387+00:00 1751650876.741291 \n", + " 2 3916-2025-07-04T17:41:16.732387+00:00 1751650876.776602 \n", + " 3 3916-2025-07-04T17:41:16.732387+00:00 1751650876.805409 \n", + " 4 3916-2025-07-04T17:41:16.732387+00:00 1751650876.844271 \n", + " 5 3916-2025-07-04T17:41:16.732387+00:00 1751650876.948936 \n", + " \n", + " evaluating_worker_id evaluation_duration \\\n", + " id \n", + " 1 3916-2025-07-04T17:41:16.732387+00:00 0.003479 \n", + " 2 3916-2025-07-04T17:41:16.732387+00:00 0.001969 \n", + " 3 3916-2025-07-04T17:41:16.732387+00:00 0.003328 \n", + " 4 3916-2025-07-04T17:41:16.732387+00:00 0.077663 \n", + " 5 3916-2025-07-04T17:41:16.732387+00:00 0.001381 \n", + " \n", + " time_started time_end \n", + " id \n", + " 1 1751650876.742245 1751650876.752408 \n", + " 2 1751650876.777807 1751650876.784942 \n", + " 3 1751650876.806628 1751650876.813765 \n", + " 4 1751650876.845342 1751650876.928955 \n", + " 5 1751650876.949865 1751650876.954657 \n", + " \n", + " [5 rows x 251 columns],\n", + " num_success 5.0\n", + " best_objective_to_minimize -0.006348\n", + " best_config_id 4\n", + " SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 2\n", + " SAMPLING__Resolvable.model.args[0].resampled_categorical::categorical__3 \n", + " ... \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 2\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 2\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 1\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " Length: 242, dtype: object)" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from neps.space.neps_spaces.optimizers.algorithms import RandomSearch\n", + "import neps\n", + "\n", + "pipeline_space = NN_Space()\n", + "\n", + "neps.run(\n", + " evaluate_pipeline=evaluate_pipeline,\n", + " pipeline_space=pipeline_space,\n", + " optimizer=RandomSearch,\n", + " root_directory=\"results/neps_spaces_nn_example\",\n", + " post_run_summary=True,\n", + " max_evaluations_total=5,\n", + " overwrite_working_directory=True,\n", + ")\n", + "neps.status(\"results/neps_spaces_nn_example\", print_summary=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From eaceb8c8ff6dc0a6ff8cce5b26df7f72b8390935 Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 4 Jul 2025 19:48:03 +0200 Subject: [PATCH 015/156] Change import order --- .../neps_spaces/pytorch_nn_example.ipynb | 274 ++++++++++-------- 1 file changed, 150 insertions(+), 124 deletions(-) diff --git a/neps_examples/neps_spaces/pytorch_nn_example.ipynb b/neps_examples/neps_spaces/pytorch_nn_example.ipynb index c938188f8..139c4f1db 100644 --- a/neps_examples/neps_spaces/pytorch_nn_example.ipynb +++ b/neps_examples/neps_spaces/pytorch_nn_example.ipynb @@ -2,26 +2,16 @@ "cells": [ { "cell_type": "code", - "execution_count": 23, - "id": "e61a523f", - "metadata": {}, - "outputs": [], - "source": [ - "from neps.space.neps_spaces.parameters import Pipeline, Operation, Categorical, Resampled\n", - "\n", - "import torch\n", - "import torch.nn as nn" - ] - }, - { - "cell_type": "code", - "execution_count": 24, + "execution_count": 45, "id": "f3ca063f", "metadata": {}, "outputs": [], "source": [ "# Define the neural network architecture using PyTorch as usual\n", "\n", + "import torch\n", + "import torch.nn as nn\n", + "\n", "class ReLUConvBN(nn.Module):\n", " def __init__(self, out_channels, kernel_size, stride, padding):\n", " super().__init__()\n", @@ -55,11 +45,15 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 46, "id": "4bda71ce", "metadata": {}, "outputs": [], "source": [ + "# Define the NEPS space for the neural network architecture\n", + "\n", + "from neps.space.neps_spaces.parameters import Pipeline, Operation, Categorical, Resampled\n", + "\n", "class NN_Space(Pipeline):\n", " _id = Operation(operator=Identity)\n", " _three = Operation(operator=nn.Conv2d,kwargs={\"in_channels\":3, \"out_channels\":3, \"kernel_size\":3, \"stride\":1, \"padding\":1})\n", @@ -108,7 +102,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 47, "id": "17005669", "metadata": {}, "outputs": [ @@ -119,19 +113,21 @@ "Callable:\n", "\n", "Sequential(\n", - " (0): Identity()\n", - " (1): Conv2d(3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", - " (2): Conv2d(3, 3, kernel_size=(1, 1), stride=(1, 1))\n", + " (0): ReLUConvBN(\n", + " (op): Sequential(\n", + " (0): ReLU()\n", + " (1): LazyConv2d(0, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(2, 2), bias=False)\n", + " (2): LazyBatchNorm2d(0, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", ")\n", "\n", "\n", "Config string:\n", "\n", - "( () () ())\n", + "( ( {'out_channels': 3, 'kernel_size': 3, 'stride': 1, 'padding': 1}))\n", "\t01 :: \n", - "\t\t02 :: \n", - "\t\t02 :: \n", - "\t\t02 :: \n" + "\t\t02 :: {'out_channels': 3, 'kernel_size': 3, 'stride': 1, 'padding': 1}\n" ] } ], @@ -157,7 +153,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 48, "id": "9efeb556", "metadata": {}, "outputs": [], @@ -174,7 +170,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 49, "id": "fa9cabbf", "metadata": {}, "outputs": [ @@ -186,11 +182,11 @@ "\n", " success: 5\n", "\n", - "# Best Found (config 4):\n", + "# Best Found (config 3):\n", "\n", - " objective_to_minimize: -0.00634765625\n", - " config: {'SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1}\n", - " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spaces\\results\\neps_spaces_nn_example\\configs\\config_4\n" + " objective_to_minimize: -56006.5703125\n", + " config: {'SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[2].resampled_categorical::categorical__3': 2}\n", + " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spaces\\results\\neps_spaces_nn_example\\configs\\config_3\n" ] }, { @@ -198,91 +194,91 @@ "text/plain": [ "( config.SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 \\\n", " id \n", - " 1 1 \n", - " 2 4 \n", - " 3 0 \n", - " 4 2 \n", - " 5 2 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 \n", - " 2 1 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[1].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 \n", - " 2 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[2].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 \n", - " 2 0 \n", - " 3 \n", - " 4 \n", - " 5 \n", + " 1 2 \n", + " 2 2 \n", + " 3 4 \n", + " 4 4 \n", + " 5 4 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__4 \\\n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", " id \n", - " 1 \n", - " 2 \n", - " 3 2 \n", + " 1 1 \n", + " 2 2 \n", + " 3 \n", " 4 \n", " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 \n", - " 2 \n", - " 3 2 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \\\n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", " id \n", " 1 \n", - " 2 \n", - " 3 3 \n", + " 2 0 \n", + " 3 \n", " 4 \n", " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \\\n", " id \n", " 1 \n", - " 2 \n", - " 3 1 \n", + " 2 3 \n", + " 3 \n", " 4 \n", " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \\\n", - " id \n", - " 1 \n", - " 2 \n", - " 3 0 \n", - " 4 \n", - " 5 \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", + " id \n", + " 1 \n", + " 2 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", + " id \n", + " 1 \n", + " 2 3 \n", + " 3 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", + " id \n", + " 1 \n", + " 2 0 \n", + " 3 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \\\n", + " id \n", + " 1 \n", + " 2 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 \n", + " 2 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 \n", - " 2 \n", - " 3 2 \n", - " 4 \n", - " 5 \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \\\n", + " id \n", + " 1 \n", + " 2 5 \n", + " 3 \n", + " 4 \n", + " 5 \n", " \n", " ... reported_as evaluation_duration \\\n", " id ... \n", - " 1 ... success 0.003479 \n", - " 2 ... success 0.001969 \n", - " 3 ... success 0.003328 \n", - " 4 ... success 0.077663 \n", - " 5 ... success 0.001381 \n", + " 1 ... success 0.004197 \n", + " 2 ... success 0.009361 \n", + " 3 ... success 0.001245 \n", + " 4 ... success 0.001364 \n", + " 5 ... success 0.002229 \n", " \n", " location state \\\n", " id \n", @@ -294,44 +290,74 @@ " \n", " sampling_worker_id time_sampled \\\n", " id \n", - " 1 3916-2025-07-04T17:41:16.732387+00:00 1751650876.741291 \n", - " 2 3916-2025-07-04T17:41:16.732387+00:00 1751650876.776602 \n", - " 3 3916-2025-07-04T17:41:16.732387+00:00 1751650876.805409 \n", - " 4 3916-2025-07-04T17:41:16.732387+00:00 1751650876.844271 \n", - " 5 3916-2025-07-04T17:41:16.732387+00:00 1751650876.948936 \n", + " 1 3916-2025-07-04T17:47:50.522247+00:00 1751651270.530558 \n", + " 2 3916-2025-07-04T17:47:50.522247+00:00 1751651270.559515 \n", + " 3 3916-2025-07-04T17:47:50.522247+00:00 1751651270.589489 \n", + " 4 3916-2025-07-04T17:47:50.522247+00:00 1751651270.61193 \n", + " 5 3916-2025-07-04T17:47:50.522247+00:00 1751651270.633809 \n", " \n", " evaluating_worker_id evaluation_duration \\\n", " id \n", - " 1 3916-2025-07-04T17:41:16.732387+00:00 0.003479 \n", - " 2 3916-2025-07-04T17:41:16.732387+00:00 0.001969 \n", - " 3 3916-2025-07-04T17:41:16.732387+00:00 0.003328 \n", - " 4 3916-2025-07-04T17:41:16.732387+00:00 0.077663 \n", - " 5 3916-2025-07-04T17:41:16.732387+00:00 0.001381 \n", + " 1 3916-2025-07-04T17:47:50.522247+00:00 0.004197 \n", + " 2 3916-2025-07-04T17:47:50.522247+00:00 0.009361 \n", + " 3 3916-2025-07-04T17:47:50.522247+00:00 0.001245 \n", + " 4 3916-2025-07-04T17:47:50.522247+00:00 0.001364 \n", + " 5 3916-2025-07-04T17:47:50.522247+00:00 0.002229 \n", " \n", " time_started time_end \n", " id \n", - " 1 1751650876.742245 1751650876.752408 \n", - " 2 1751650876.777807 1751650876.784942 \n", - " 3 1751650876.806628 1751650876.813765 \n", - " 4 1751650876.845342 1751650876.928955 \n", - " 5 1751650876.949865 1751650876.954657 \n", + " 1 1751651270.531482 1751651270.539361 \n", + " 2 1751651270.560551 1751651270.573804 \n", + " 3 1751651270.590477 1751651270.595073 \n", + " 4 1751651270.612845 1751651270.617602 \n", + " 5 1751651270.634704 1751651270.640658 \n", " \n", - " [5 rows x 251 columns],\n", - " num_success 5.0\n", - " best_objective_to_minimize -0.006348\n", - " best_config_id 4\n", - " SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 2\n", - " SAMPLING__Resolvable.model.args[0].resampled_categorical::categorical__3 \n", - " ... \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 2\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 2\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 1\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " Length: 242, dtype: object)" + " [5 rows x 50 columns],\n", + " num_success 5.0\n", + " best_objective_to_minimize -56006.570312\n", + " best_config_id 3\n", + " SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 4\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_categorical::categorical__3 2\n", + " SAMPLING__Resolvable.model.args[1].resampled_categorical::categorical__3 1\n", + " SAMPLING__Resolvable.model.args[2].resampled_categorical::categorical__3 2\n", + " dtype: object)" ] }, - "execution_count": 28, + "execution_count": 49, "metadata": {}, "output_type": "execute_result" } From 1c854b2591b27ba6f2d5a6ea9c5806ecef1611e4 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sat, 5 Jul 2025 13:05:30 +0200 Subject: [PATCH 016/156] Refactor optimizer imports in integration tests - Updated imports for optimizers in test_neps_integration.py to use the new neps.optimizers module. - Replaced references to old RandomSearch and ComplexRandomSearch algorithms with their new counterparts from neps_algorithms. - Adjusted the optimizer imports in test_neps_integration_priorband__max_cost.py and test_neps_integration_priorband__max_evals.py to align with the new structure. - Ensured all instances of priorband optimizers are updated to use the new neps_algorithms module. --- neps/__init__.py | 7 + neps/optimizers/algorithms.py | 1 + neps/optimizers/neps_algorithms.py | 184 ++++++++++++ .../neps_bracket_optimizer.py} | 169 +---------- .../neps_priorband.py} | 2 +- .../neps_random_search.py} | 20 +- neps/space/neps_spaces/optimizers/__init__.py | 0 .../neps_spaces/pytorch_nn_example.ipynb | 278 +++++++++--------- .../test_neps_space/test_neps_integration.py | 21 +- ...st_neps_integration_priorband__max_cost.py | 15 +- ...t_neps_integration_priorband__max_evals.py | 15 +- 11 files changed, 387 insertions(+), 325 deletions(-) create mode 100644 neps/optimizers/neps_algorithms.py rename neps/{space/neps_spaces/optimizers/bracket_optimizer.py => optimizers/neps_bracket_optimizer.py} (54%) rename neps/{space/neps_spaces/optimizers/priorband.py => optimizers/neps_priorband.py} (99%) rename neps/{space/neps_spaces/optimizers/algorithms.py => optimizers/neps_random_search.py} (95%) delete mode 100644 neps/space/neps_spaces/optimizers/__init__.py diff --git a/neps/__init__.py b/neps/__init__.py index 756217609..3554291c9 100644 --- a/neps/__init__.py +++ b/neps/__init__.py @@ -1,3 +1,10 @@ +"""NePS: A framework for Neural Architecture Search and Hyperparameter Optimization. +This module provides a unified interface for defining search spaces, running optimizers, +and visualizing results. It includes various optimizers, search space definitions, +and plotting utilities, making it easy to experiment with different configurations +and algorithms. +""" + from neps.api import run from neps.optimizers import algorithms from neps.optimizers.ask_and_tell import AskAndTell diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 8f24618a8..216d604a8 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -24,6 +24,7 @@ from pathlib import Path from typing import TYPE_CHECKING, Any, Concatenate, Literal, TypeAlias +import pandas as pd import torch from neps.optimizers.ask_and_tell import AskAndTell # noqa: F401 diff --git a/neps/optimizers/neps_algorithms.py b/neps/optimizers/neps_algorithms.py new file mode 100644 index 000000000..43b56b36d --- /dev/null +++ b/neps/optimizers/neps_algorithms.py @@ -0,0 +1,184 @@ +from __future__ import annotations + +from collections.abc import Callable, Sequence +from functools import partial +from typing import TYPE_CHECKING, Literal + +import pandas as pd + +from neps.optimizers.neps_bracket_optimizer import _NePSBracketOptimizer +from neps.optimizers.neps_priorband import NePSPriorBandSampler +from neps.optimizers.neps_random_search import NePSComplexRandomSearch, NePSRandomSearch + +if TYPE_CHECKING: + import pandas as pd + + from neps.optimizers.utils.brackets import Bracket + from neps.space.neps_spaces.parameters import Pipeline + + +def _neps_bracket_optimizer( + pipeline_space: Pipeline, + *, + bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], + eta: int, + sampler: Literal["priorband"], + sample_prior_first: bool | Literal["highest_fidelity"], + early_stopping_rate: int | None, +) -> _NePSBracketOptimizer: + fidelity_attrs = pipeline_space.fidelity_attrs + + if len(fidelity_attrs) != 1: + raise ValueError( + "Only one fidelity should be defined in the pipeline space." + f"\nGot: {fidelity_attrs!r}" + ) + + fidelity_name, fidelity_obj = next(iter(fidelity_attrs.items())) + + if sample_prior_first not in (True, False, "highest_fidelity"): + raise ValueError( + "sample_prior_first should be either True, False or 'highest_fidelity'" + ) + + from neps.optimizers.utils import brackets + + # Determine the strategy for creating brackets for sampling + create_brackets: Callable[[pd.DataFrame], Sequence[Bracket] | Bracket] + match bracket_type: + case "successive_halving": + assert early_stopping_rate is not None + rung_to_fidelity, rung_sizes = brackets.calculate_sh_rungs( + bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + eta=eta, + early_stopping_rate=early_stopping_rate, + ) + create_brackets = partial( + brackets.Sync.create_repeating, + rung_sizes=rung_sizes, + ) + + case "hyperband": + assert early_stopping_rate is None + rung_to_fidelity, bracket_layouts = brackets.calculate_hb_bracket_layouts( + bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + eta=eta, + ) + create_brackets = partial( + brackets.Hyperband.create_repeating, + bracket_layouts=bracket_layouts, + ) + + case "asha": + assert early_stopping_rate is not None + rung_to_fidelity, _rung_sizes = brackets.calculate_sh_rungs( + bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + eta=eta, + early_stopping_rate=early_stopping_rate, + ) + create_brackets = partial( + brackets.Async.create, + rungs=list(rung_to_fidelity), + eta=eta, + ) + + case "async_hb": + assert early_stopping_rate is None + rung_to_fidelity, bracket_layouts = brackets.calculate_hb_bracket_layouts( + bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + eta=eta, + ) + # We don't care about the capacity of each bracket, we need the rung layout + bracket_rungs = [list(bracket.keys()) for bracket in bracket_layouts] + create_brackets = partial( + brackets.AsyncHyperband.create, + bracket_rungs=bracket_rungs, + eta=eta, + ) + case _: + raise ValueError(f"Unknown bracket type: {bracket_type}") + + _sampler: NePSPriorBandSampler + match sampler: + case "priorband": + _sampler = NePSPriorBandSampler( + space=pipeline_space, + eta=eta, + early_stopping_rate=( + early_stopping_rate if early_stopping_rate is not None else 0 + ), + fid_bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + ) + case _: + raise ValueError(f"Unknown sampler: {sampler}") + + return _NePSBracketOptimizer( + space=pipeline_space, + eta=eta, + rung_to_fid=rung_to_fidelity, + sampler=_sampler, + sample_prior_first=sample_prior_first, + create_brackets=create_brackets, + ) + + +def neps_priorband( + space: Pipeline, + *, + eta: int = 3, + sample_prior_first: bool | Literal["highest_fidelity"] = False, + base: Literal["successive_halving", "hyperband", "asha", "async_hb"] = "hyperband", +) -> _NePSBracketOptimizer: + """Create a PriorBand optimizer for the given pipeline space. + + Args: + space: The pipeline space to optimize over. + eta: The eta parameter for the algorithm. + sample_prior_first: Whether to sample the prior first. + If set to `"highest_fidelity"`, the prior will be sampled at the + highest fidelity, otherwise at the lowest fidelity. + base: The type of bracket optimizer to use. One of: + - "successive_halving" + - "hyperband" + - "asha" + - "async_hb" + Returns: + An instance of _BracketOptimizer configured for PriorBand sampling. + """ + return _neps_bracket_optimizer( + pipeline_space=space, + bracket_type=base, + eta=eta, + sampler="priorband", + sample_prior_first=sample_prior_first, + early_stopping_rate=0 if base in ("successive_halving", "asha") else None, + ) + + +def neps_random_search( + pipeline: Pipeline, +) -> NePSRandomSearch: + """A simple random search algorithm that samples configurations uniformly at random. + + Args: + pipeline: The search space to sample from. + """ + + return NePSRandomSearch( + pipeline=pipeline, + ) + + +def neps_complex_random_search( + pipeline: Pipeline, +) -> NePSComplexRandomSearch: + """A complex random search algorithm that samples configurations uniformly at random, + but allows for more complex sampling strategies. + + Args: + pipeline: The search space to sample from. + """ + + return NePSComplexRandomSearch( + pipeline=pipeline, + ) diff --git a/neps/space/neps_spaces/optimizers/bracket_optimizer.py b/neps/optimizers/neps_bracket_optimizer.py similarity index 54% rename from neps/space/neps_spaces/optimizers/bracket_optimizer.py rename to neps/optimizers/neps_bracket_optimizer.py index 900b0247f..6ca237acf 100644 --- a/neps/space/neps_spaces/optimizers/bracket_optimizer.py +++ b/neps/optimizers/neps_bracket_optimizer.py @@ -10,21 +10,24 @@ import logging from collections.abc import Callable, Mapping, Sequence from dataclasses import dataclass -from functools import partial from typing import TYPE_CHECKING, Any, Literal import pandas as pd import neps.optimizers.bracket_optimizer as standard_bracket_optimizer -import neps.space.neps_spaces.parameters -import neps.space.neps_spaces.sampling from neps.optimizers.optimizer import SampledConfig from neps.optimizers.utils.brackets import PromoteAction, SampleAction from neps.space.neps_spaces import neps_space -from neps.space.neps_spaces.optimizers.priorband import PriorBandSampler +from neps.space.neps_spaces.sampling import ( + OnlyPredefinedValuesSampler, + PriorOrFallbackSampler, + RandomSampler, +) if TYPE_CHECKING: + from neps.optimizers.neps_priorband import NePSPriorBandSampler from neps.optimizers.utils.brackets import Bracket + from neps.space.neps_spaces.parameters import Pipeline from neps.state.optimizer import BudgetInfo from neps.state.trial import Trial @@ -33,10 +36,10 @@ @dataclass -class _BracketOptimizer: +class _NePSBracketOptimizer: """The pipeline space to optimize over.""" - space: neps.space.neps_spaces.parameters.Pipeline + space: Pipeline """Whether or not to sample the prior first. @@ -55,12 +58,12 @@ class _BracketOptimizer: create_brackets: Callable[[pd.DataFrame], Sequence[Bracket] | Bracket] """The sampler used to generate new trials.""" - sampler: PriorBandSampler + sampler: NePSPriorBandSampler def __call__( # noqa: C901 self, trials: Mapping[str, Trial], - budget_info: BudgetInfo | None, # noqa: ARG002 + budget_info: BudgetInfo | None, n: int | None = None, ) -> SampledConfig | list[SampledConfig]: assert n is None, "TODO" @@ -145,13 +148,9 @@ def _sample_prior( fidelity_level: Literal["min"] | Literal["max"], ) -> dict[str, Any]: # TODO: [lum] have a CenterSampler as fallback, not Random - _try_always_priors_sampler = ( - neps.space.neps_spaces.sampling.PriorOrFallbackSampler( - fallback_sampler=neps.space.neps_spaces.sampling.RandomSampler( - predefined_samplings={} - ), - prior_use_probability=1, - ) + _try_always_priors_sampler = PriorOrFallbackSampler( + fallback_sampler=RandomSampler(predefined_samplings={}), + prior_use_probability=1, ) _environment_values = {} @@ -188,7 +187,7 @@ def _convert_to_another_rung( _resolved_pipeline, resolution_context = neps_space.resolve( pipeline=self.space, - domain_sampler=neps.space.neps_spaces.sampling.OnlyPredefinedValuesSampler( + domain_sampler=OnlyPredefinedValuesSampler( predefined_samplings=data.predefined_samplings, ), environment_values=_environment_values, @@ -196,141 +195,3 @@ def _convert_to_another_rung( config = neps_space.NepsCompatConverter.to_neps_config(resolution_context) return dict(**config) - - -def priorband( - space: neps.space.neps_spaces.parameters.Pipeline, - *, - eta: int = 3, - sample_prior_first: bool | Literal["highest_fidelity"] = False, - base: Literal["successive_halving", "hyperband", "asha", "async_hb"] = "hyperband", -) -> _BracketOptimizer: - """Create a PriorBand optimizer for the given pipeline space. - - Args: - space: The pipeline space to optimize over. - eta: The eta parameter for the algorithm. - sample_prior_first: Whether to sample the prior first. - If set to `"highest_fidelity"`, the prior will be sampled at the - highest fidelity, otherwise at the lowest fidelity. - base: The type of bracket optimizer to use. One of: - - "successive_halving" - - "hyperband" - - "asha" - - "async_hb" - Returns: - An instance of _BracketOptimizer configured for PriorBand sampling. - """ - return _bracket_optimizer( - pipeline_space=space, - bracket_type=base, - eta=eta, - sampler="priorband", - sample_prior_first=sample_prior_first, - early_stopping_rate=0 if base in ("successive_halving", "asha") else None, - ) - - -def _bracket_optimizer( - pipeline_space: neps.space.neps_spaces.parameters.Pipeline, - *, - bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], - eta: int, - sampler: Literal["priorband"], - sample_prior_first: bool | Literal["highest_fidelity"], - early_stopping_rate: int | None, -) -> _BracketOptimizer: - fidelity_attrs = pipeline_space.fidelity_attrs - - if len(fidelity_attrs) != 1: - raise ValueError( - "Only one fidelity should be defined in the pipeline space." - f"\nGot: {fidelity_attrs!r}" - ) - - fidelity_name, fidelity_obj = next(iter(fidelity_attrs.items())) - - if sample_prior_first not in (True, False, "highest_fidelity"): - raise ValueError( - "sample_prior_first should be either True, False or 'highest_fidelity'" - ) - - from neps.optimizers.utils import brackets - - # Determine the strategy for creating brackets for sampling - create_brackets: Callable[[pd.DataFrame], Sequence[Bracket] | Bracket] - match bracket_type: - case "successive_halving": - assert early_stopping_rate is not None - rung_to_fidelity, rung_sizes = brackets.calculate_sh_rungs( - bounds=(fidelity_obj.min_value, fidelity_obj.max_value), - eta=eta, - early_stopping_rate=early_stopping_rate, - ) - create_brackets = partial( - brackets.Sync.create_repeating, - rung_sizes=rung_sizes, - ) - - case "hyperband": - assert early_stopping_rate is None - rung_to_fidelity, bracket_layouts = brackets.calculate_hb_bracket_layouts( - bounds=(fidelity_obj.min_value, fidelity_obj.max_value), - eta=eta, - ) - create_brackets = partial( - brackets.Hyperband.create_repeating, - bracket_layouts=bracket_layouts, - ) - - case "asha": - assert early_stopping_rate is not None - rung_to_fidelity, _rung_sizes = brackets.calculate_sh_rungs( - bounds=(fidelity_obj.min_value, fidelity_obj.max_value), - eta=eta, - early_stopping_rate=early_stopping_rate, - ) - create_brackets = partial( - brackets.Async.create, - rungs=list(rung_to_fidelity), - eta=eta, - ) - - case "async_hb": - assert early_stopping_rate is None - rung_to_fidelity, bracket_layouts = brackets.calculate_hb_bracket_layouts( - bounds=(fidelity_obj.min_value, fidelity_obj.max_value), - eta=eta, - ) - # We don't care about the capacity of each bracket, we need the rung layout - bracket_rungs = [list(bracket.keys()) for bracket in bracket_layouts] - create_brackets = partial( - brackets.AsyncHyperband.create, - bracket_rungs=bracket_rungs, - eta=eta, - ) - case _: - raise ValueError(f"Unknown bracket type: {bracket_type}") - - _sampler: PriorBandSampler - match sampler: - case "priorband": - _sampler = PriorBandSampler( - space=pipeline_space, - eta=eta, - early_stopping_rate=( - early_stopping_rate if early_stopping_rate is not None else 0 - ), - fid_bounds=(fidelity_obj.min_value, fidelity_obj.max_value), - ) - case _: - raise ValueError(f"Unknown sampler: {sampler}") - - return _BracketOptimizer( - space=pipeline_space, - eta=eta, - rung_to_fid=rung_to_fidelity, - sampler=_sampler, - sample_prior_first=sample_prior_first, - create_brackets=create_brackets, - ) diff --git a/neps/space/neps_spaces/optimizers/priorband.py b/neps/optimizers/neps_priorband.py similarity index 99% rename from neps/space/neps_spaces/optimizers/priorband.py rename to neps/optimizers/neps_priorband.py index aa2139f45..d369fb73a 100644 --- a/neps/space/neps_spaces/optimizers/priorband.py +++ b/neps/optimizers/neps_priorband.py @@ -23,7 +23,7 @@ @dataclass -class PriorBandSampler: +class NePSPriorBandSampler: """Implement a sampler based on PriorBand.""" """The pipeline space to optimize over.""" diff --git a/neps/space/neps_spaces/optimizers/algorithms.py b/neps/optimizers/neps_random_search.py similarity index 95% rename from neps/space/neps_spaces/optimizers/algorithms.py rename to neps/optimizers/neps_random_search.py index c3a0b04a8..ca22c58a0 100644 --- a/neps/space/neps_spaces/optimizers/algorithms.py +++ b/neps/optimizers/neps_random_search.py @@ -1,8 +1,5 @@ -"""Optimizers for NePS pipelines. -These optimizers implement various strategies for sampling configurations from a NePS -pipeline. They include simple random search, complex random search with mutation and -crossover, and more advanced sampling techniques that leverage prior knowledge and -successful trials. +"""This module implements a simple random search optimizer for a NePS pipeline. +It samples configurations randomly from the pipeline's domain and environment values. """ from __future__ import annotations @@ -12,10 +9,7 @@ from collections.abc import Mapping from typing import TYPE_CHECKING -from neps.space.neps_spaces.neps_space import ( - _prepare_sampled_configs, - resolve, -) +from neps.space.neps_spaces.neps_space import _prepare_sampled_configs, resolve from neps.space.neps_spaces.sampling import ( CrossoverByMixingSampler, CrossoverNotPossibleError, @@ -33,7 +27,7 @@ from neps.state.trial import Trial -class RandomSearch: +class NePSRandomSearch: """A simple random search optimizer for a NePS pipeline. It samples configurations randomly from the pipeline's domain and environment values. :param pipeline: The pipeline to optimize, which should be a Pipeline object. @@ -57,7 +51,7 @@ def __init__(self, pipeline: Pipeline): def __call__( self, trials: Mapping[str, trial_state.Trial], - budget_info: optimizer_state.BudgetInfo | None, # noqa: ARG002 + budget_info: optimizer_state.BudgetInfo | None, n: int | None = None, ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: """Sample configurations randomly from the pipeline's domain and environment @@ -88,7 +82,7 @@ def __call__( return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) -class ComplexRandomSearch: +class NePSComplexRandomSearch: """A complex random search optimizer for a NePS pipeline. It samples configurations randomly from the pipeline's domain and environment values, and also performs mutations and crossovers based on previous successful trials. @@ -123,7 +117,7 @@ def __init__(self, pipeline: Pipeline): def __call__( self, trials: Mapping[str, trial_state.Trial], - budget_info: optimizer_state.BudgetInfo | None, # noqa: ARG002 + budget_info: optimizer_state.BudgetInfo | None, n: int | None = None, ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: """Sample configurations randomly from the pipeline's domain and environment diff --git a/neps/space/neps_spaces/optimizers/__init__.py b/neps/space/neps_spaces/optimizers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neps_examples/neps_spaces/pytorch_nn_example.ipynb b/neps_examples/neps_spaces/pytorch_nn_example.ipynb index 139c4f1db..6b73edb37 100644 --- a/neps_examples/neps_spaces/pytorch_nn_example.ipynb +++ b/neps_examples/neps_spaces/pytorch_nn_example.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 45, + "execution_count": 1, "id": "f3ca063f", "metadata": {}, "outputs": [], @@ -45,7 +45,7 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 2, "id": "4bda71ce", "metadata": {}, "outputs": [], @@ -102,7 +102,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 3, "id": "17005669", "metadata": {}, "outputs": [ @@ -153,7 +153,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 6, "id": "9efeb556", "metadata": {}, "outputs": [], @@ -170,7 +170,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 7, "id": "fa9cabbf", "metadata": {}, "outputs": [ @@ -182,11 +182,11 @@ "\n", " success: 5\n", "\n", - "# Best Found (config 3):\n", + "# Best Found (config 1):\n", "\n", - " objective_to_minimize: -56006.5703125\n", - " config: {'SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[2].resampled_categorical::categorical__3': 2}\n", - " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spaces\\results\\neps_spaces_nn_example\\configs\\config_3\n" + " objective_to_minimize: -23793.197265625\n", + " config: {'SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[7].resampled_categorical::categorical__3': 0}\n", + " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spaces\\results\\neps_spaces_nn_example\\configs\\config_1\n" ] }, { @@ -194,91 +194,91 @@ "text/plain": [ "( config.SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 \\\n", " id \n", - " 1 2 \n", - " 2 2 \n", - " 3 4 \n", - " 4 4 \n", - " 5 4 \n", + " 1 5 \n", + " 2 0 \n", + " 3 5 \n", + " 4 3 \n", + " 5 3 \n", " \n", " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", " id \n", - " 1 1 \n", - " 2 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", + " 1 3 \n", + " 2 \n", + " 3 1 \n", + " 4 1 \n", + " 5 1 \n", " \n", " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", " id \n", - " 1 \n", - " 2 0 \n", + " 1 3 \n", + " 2 \n", " 3 \n", " 4 \n", " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \\\n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", " id \n", - " 1 \n", - " 2 3 \n", + " 1 4 \n", + " 2 \n", " 3 \n", " 4 \n", " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", - " id \n", - " 1 \n", - " 2 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 1 \n", + " 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", - " id \n", - " 1 \n", - " 2 3 \n", - " 3 \n", - " 4 \n", - " 5 \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 2 \n", + " 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", - " id \n", - " 1 \n", - " 2 0 \n", - " 3 \n", - " 4 \n", - " 5 \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 2 \n", + " 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \\\n", - " id \n", - " 1 \n", - " 2 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \\\n", + " id \n", + " 1 0 \n", + " 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 \n", - " 2 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 1 \n", + " 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \\\n", - " id \n", - " 1 \n", - " 2 5 \n", - " 3 \n", - " 4 \n", - " 5 \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \\\n", + " id \n", + " 1 1 \n", + " 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", " \n", " ... reported_as evaluation_duration \\\n", " id ... \n", - " 1 ... success 0.004197 \n", - " 2 ... success 0.009361 \n", - " 3 ... success 0.001245 \n", - " 4 ... success 0.001364 \n", - " 5 ... success 0.002229 \n", + " 1 ... success 0.009104 \n", + " 2 ... success 0.006249 \n", + " 3 ... success 0.005574 \n", + " 4 ... success 0.005795 \n", + " 5 ... success 0.00329 \n", " \n", " location state \\\n", " id \n", @@ -290,80 +290,96 @@ " \n", " sampling_worker_id time_sampled \\\n", " id \n", - " 1 3916-2025-07-04T17:47:50.522247+00:00 1751651270.530558 \n", - " 2 3916-2025-07-04T17:47:50.522247+00:00 1751651270.559515 \n", - " 3 3916-2025-07-04T17:47:50.522247+00:00 1751651270.589489 \n", - " 4 3916-2025-07-04T17:47:50.522247+00:00 1751651270.61193 \n", - " 5 3916-2025-07-04T17:47:50.522247+00:00 1751651270.633809 \n", + " 1 7648-2025-07-04T23:22:27.070755+00:00 1751671347.079726 \n", + " 2 7648-2025-07-04T23:22:27.070755+00:00 1751671347.110561 \n", + " 3 7648-2025-07-04T23:22:27.070755+00:00 1751671347.136625 \n", + " 4 7648-2025-07-04T23:22:27.070755+00:00 1751671347.163217 \n", + " 5 7648-2025-07-04T23:22:27.070755+00:00 1751671347.188885 \n", " \n", " evaluating_worker_id evaluation_duration \\\n", " id \n", - " 1 3916-2025-07-04T17:47:50.522247+00:00 0.004197 \n", - " 2 3916-2025-07-04T17:47:50.522247+00:00 0.009361 \n", - " 3 3916-2025-07-04T17:47:50.522247+00:00 0.001245 \n", - " 4 3916-2025-07-04T17:47:50.522247+00:00 0.001364 \n", - " 5 3916-2025-07-04T17:47:50.522247+00:00 0.002229 \n", + " 1 7648-2025-07-04T23:22:27.070755+00:00 0.009104 \n", + " 2 7648-2025-07-04T23:22:27.070755+00:00 0.006249 \n", + " 3 7648-2025-07-04T23:22:27.070755+00:00 0.005574 \n", + " 4 7648-2025-07-04T23:22:27.070755+00:00 0.005795 \n", + " 5 7648-2025-07-04T23:22:27.070755+00:00 0.00329 \n", " \n", " time_started time_end \n", " id \n", - " 1 1751651270.531482 1751651270.539361 \n", - " 2 1751651270.560551 1751651270.573804 \n", - " 3 1751651270.590477 1751651270.595073 \n", - " 4 1751651270.612845 1751651270.617602 \n", - " 5 1751651270.634704 1751651270.640658 \n", + " 1 1751671347.080621 1751671347.093368 \n", + " 2 1751671347.11144 1751671347.121197 \n", + " 3 1751671347.137631 1751671347.146662 \n", + " 4 1751671347.164088 1751671347.173416 \n", + " 5 1751671347.189788 1751671347.196588 \n", " \n", - " [5 rows x 50 columns],\n", - " num_success 5.0\n", - " best_objective_to_minimize -56006.570312\n", - " best_config_id 3\n", - " SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 4\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_categorical::categorical__3 2\n", - " SAMPLING__Resolvable.model.args[1].resampled_categorical::categorical__3 1\n", - " SAMPLING__Resolvable.model.args[2].resampled_categorical::categorical__3 2\n", + " [5 rows x 66 columns],\n", + " num_success 5.0\n", + " best_objective_to_minimize -23793.197266\n", + " best_config_id 1\n", + " SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 5\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6 3\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 3\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 4\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 1\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3 2\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3 2\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 0\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 1\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 1\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 2\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 4\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 2\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3 1\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3 0\n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args.resampled_categorical::categorical__6 1\n", + " SAMPLING__Resolvable.model.args[2].resampled_categorical::categorical__3 2\n", + " SAMPLING__Resolvable.model.args[3].resampled_categorical::categorical__3 1\n", + " SAMPLING__Resolvable.model.args[4].resampled_categorical::categorical__3 2\n", + " SAMPLING__Resolvable.model.args[5].resampled_categorical::categorical__3 0\n", + " SAMPLING__Resolvable.model.args[6].resampled_categorical::categorical__3 0\n", + " SAMPLING__Resolvable.model.args[7].resampled_categorical::categorical__3 0\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3 \n", " dtype: object)" ] }, - "execution_count": 49, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from neps.space.neps_spaces.optimizers.algorithms import RandomSearch\n", + "from neps.optimizers.neps_algorithms import neps_random_search\n", "import neps\n", "\n", "pipeline_space = NN_Space()\n", @@ -371,7 +387,7 @@ "neps.run(\n", " evaluate_pipeline=evaluate_pipeline,\n", " pipeline_space=pipeline_space,\n", - " optimizer=RandomSearch,\n", + " optimizer=neps_random_search,\n", " root_directory=\"results/neps_spaces_nn_example\",\n", " post_run_summary=True,\n", " max_evaluations_total=5,\n", diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index d122dbbcd..d59f852f4 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -5,7 +5,8 @@ import pytest import neps -import neps.space.neps_spaces.optimizers.algorithms +import neps.optimizers +from neps.optimizers import neps_algorithms from neps.space.neps_spaces.parameters import ( Categorical, ConfidenceLevel, @@ -154,8 +155,8 @@ class DemoHyperparameterComplexSpace(Pipeline): @pytest.mark.parametrize( "optimizer", [ - neps.space.neps_spaces.optimizers.algorithms.RandomSearch, - neps.space.neps_spaces.optimizers.algorithms.ComplexRandomSearch, + neps_algorithms.neps_random_search, + neps_algorithms.neps_complex_random_search, ], ) def test_hyperparameter_demo(optimizer): @@ -177,8 +178,8 @@ def test_hyperparameter_demo(optimizer): @pytest.mark.parametrize( "optimizer", [ - neps.space.neps_spaces.optimizers.algorithms.RandomSearch, - neps.space.neps_spaces.optimizers.algorithms.ComplexRandomSearch, + neps_algorithms.neps_random_search, + neps_algorithms.neps_complex_random_search, ], ) def test_hyperparameter_with_fidelity_demo(optimizer): @@ -200,8 +201,8 @@ def test_hyperparameter_with_fidelity_demo(optimizer): @pytest.mark.parametrize( "optimizer", [ - neps.space.neps_spaces.optimizers.algorithms.RandomSearch, - neps.space.neps_spaces.optimizers.algorithms.ComplexRandomSearch, + neps_algorithms.neps_random_search, + neps_algorithms.neps_complex_random_search, ], ) def test_hyperparameter_complex_demo(optimizer): @@ -325,8 +326,8 @@ class DemoOperationSpace(Pipeline): @pytest.mark.parametrize( "optimizer", [ - neps.space.neps_spaces.optimizers.algorithms.RandomSearch, - neps.space.neps_spaces.optimizers.algorithms.ComplexRandomSearch, + neps_algorithms.neps_random_search, + neps_algorithms.neps_complex_random_search, ], ) def test_operation_demo(optimizer): @@ -334,7 +335,7 @@ def test_operation_demo(optimizer): root_directory = f"results/operation_demo__{optimizer.__name__}" neps.run( - evaluate_pipeline=hyperparameter_pipeline_to_optimize, + evaluate_pipeline=operation_pipeline_to_optimize, pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index 0f6c8ca76..b0c533855 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -7,8 +7,7 @@ import neps import neps.optimizers.algorithms as old_algorithms -import neps.space.neps_spaces.optimizers.algorithms -import neps.space.neps_spaces.optimizers.bracket_optimizer as new_bracket_optimizer +from neps.optimizers import neps_algorithms from neps.space.neps_spaces.parameters import ( ConfidenceLevel, Fidelity, @@ -95,27 +94,27 @@ class DemoHyperparameterWithFidelitySpace(Pipeline): ("optimizer", "optimizer_name"), [ ( - neps.space.neps_spaces.optimizers.algorithms.RandomSearch, + neps_algorithms.neps_random_search, "new__RandomSearch", ), ( - neps.space.neps_spaces.optimizers.algorithms.ComplexRandomSearch, + neps_algorithms.neps_complex_random_search, "new__ComplexRandomSearch", ), ( - partial(new_bracket_optimizer.priorband, base="successive_halving"), + partial(neps_algorithms.neps_priorband, base="successive_halving"), "new__priorband+successive_halving", ), ( - partial(new_bracket_optimizer.priorband, base="asha"), + partial(neps_algorithms.neps_priorband, base="asha"), "new__priorband+asha", ), ( - partial(new_bracket_optimizer.priorband, base="async_hb"), + partial(neps_algorithms.neps_priorband, base="async_hb"), "new__priorband+async_hb", ), ( - new_bracket_optimizer.priorband, + neps_algorithms.neps_priorband, "new__priorband+hyperband", ), ], diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index cb606f7f3..00cacc49e 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -7,8 +7,7 @@ import neps import neps.optimizers.algorithms as old_algorithms -import neps.space.neps_spaces.optimizers.algorithms -import neps.space.neps_spaces.optimizers.bracket_optimizer as new_bracket_optimizer +from neps.optimizers import neps_algorithms from neps.space.neps_spaces.parameters import ( ConfidenceLevel, Fidelity, @@ -82,27 +81,27 @@ class DemoHyperparameterWithFidelitySpace(Pipeline): ("optimizer", "optimizer_name"), [ ( - neps.space.neps_spaces.optimizers.algorithms.RandomSearch, + neps_algorithms.neps_random_search, "new__RandomSearch", ), ( - neps.space.neps_spaces.optimizers.algorithms.ComplexRandomSearch, + neps_algorithms.neps_complex_random_search, "new__ComplexRandomSearch", ), ( - partial(new_bracket_optimizer.priorband, base="successive_halving"), + partial(neps_algorithms.neps_priorband, base="successive_halving"), "new__priorband+successive_halving", ), ( - partial(new_bracket_optimizer.priorband, base="asha"), + partial(neps_algorithms.neps_priorband, base="asha"), "new__priorband+asha", ), ( - partial(new_bracket_optimizer.priorband, base="async_hb"), + partial(neps_algorithms.neps_priorband, base="async_hb"), "new__priorband+async_hb", ), ( - new_bracket_optimizer.priorband, + neps_algorithms.neps_priorband, "new__priorband+hyperband", ), ], From 917939bc12d1ed45dddaa5e1103e3ec2c7b00deb Mon Sep 17 00:00:00 2001 From: Meganton Date: Sat, 5 Jul 2025 14:12:51 +0200 Subject: [PATCH 017/156] Refactor type hints and improve docstring formatting in optimizer modules --- neps/api.py | 4 +++- neps/optimizers/__init__.py | 15 +++++++++++++-- neps/optimizers/algorithms.py | 3 ++- neps/optimizers/ask_and_tell.py | 1 - neps/optimizers/neps_algorithms.py | 6 +++++- neps/optimizers/neps_priorband.py | 8 ++++---- neps/optimizers/neps_random_search.py | 3 +++ neps/space/parsing.py | 2 +- .../neps_spaces/pytorch_nn_example.ipynb | 2 +- 9 files changed, 32 insertions(+), 12 deletions(-) diff --git a/neps/api.py b/neps/api.py index aa4a069eb..6017ab950 100644 --- a/neps/api.py +++ b/neps/api.py @@ -50,7 +50,9 @@ def run( # noqa: PLR0913 OptimizerChoice | Mapping[str, Any] | tuple[OptimizerChoice, Mapping[str, Any]] - | Callable[Concatenate[SearchSpace | Pipeline, ...], AskFunction] + | Callable[Concatenate[SearchSpace, ...], AskFunction] # Hack, while we transit + | Callable[Concatenate[Pipeline, ...], AskFunction] # from SearchSpace to + | Callable[Concatenate[SearchSpace | Pipeline, ...], AskFunction] # Pipeline | CustomOptimizer | Literal["auto"] ) = "auto", diff --git a/neps/optimizers/__init__.py b/neps/optimizers/__init__.py index 6ad255346..34fb5f5bb 100644 --- a/neps/optimizers/__init__.py +++ b/neps/optimizers/__init__.py @@ -48,7 +48,9 @@ def load_optimizer( OptimizerChoice | Mapping[str, Any] | tuple[OptimizerChoice, Mapping[str, Any]] - | Callable[Concatenate[SearchSpace | Pipeline, ...], AskFunction] + | Callable[Concatenate[SearchSpace, ...], AskFunction] # Hack, while we transit + | Callable[Concatenate[Pipeline, ...], AskFunction] # from SearchSpace to + | Callable[Concatenate[SearchSpace | Pipeline, ...], AskFunction] # Pipeline | CustomOptimizer | Literal["auto"] ), @@ -75,7 +77,16 @@ def load_optimizer( # Provided optimizer initializer case _ if callable(optimizer): keywords = extract_keyword_defaults(optimizer) - _optimizer = optimizer(space) + + # Error catch and type ignore needed while we transition from SearchSpace to + # Pipeline + try: + _optimizer = optimizer(space) # type: ignore + except TypeError as e: + raise TypeError( + f"Optimizer {optimizer} does not accept a space of type" + f" {type(space)}." + ) from e info = OptimizerInfo(name=optimizer.__name__, info=keywords) return _optimizer, info diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 216d604a8..9a93ac292 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -467,7 +467,8 @@ def random_search( def grid_search( pipeline_space: SearchSpace, - ignore_fidelity: bool = False, # noqa: FBT001, FBT002 + *, + ignore_fidelity: bool = False, ) -> GridSearch: """A simple grid search algorithm which discretizes the search space and evaluates all possible configurations. diff --git a/neps/optimizers/ask_and_tell.py b/neps/optimizers/ask_and_tell.py index 4eea47a8e..04f91adac 100644 --- a/neps/optimizers/ask_and_tell.py +++ b/neps/optimizers/ask_and_tell.py @@ -75,7 +75,6 @@ if TYPE_CHECKING: from neps.state.optimizer import BudgetInfo - from neps.state.pipeline_eval import EvaluatePipelineReturn def _default_worker_name() -> str: diff --git a/neps/optimizers/neps_algorithms.py b/neps/optimizers/neps_algorithms.py index 43b56b36d..bc5e8960a 100644 --- a/neps/optimizers/neps_algorithms.py +++ b/neps/optimizers/neps_algorithms.py @@ -2,7 +2,7 @@ from collections.abc import Callable, Sequence from functools import partial -from typing import TYPE_CHECKING, Literal +from typing import TYPE_CHECKING, Any, Literal import pandas as pd @@ -157,6 +157,8 @@ def neps_priorband( def neps_random_search( pipeline: Pipeline, + *_args: Any, + **_kwargs: Any, ) -> NePSRandomSearch: """A simple random search algorithm that samples configurations uniformly at random. @@ -171,6 +173,8 @@ def neps_random_search( def neps_complex_random_search( pipeline: Pipeline, + *_args: Any, + **_kwargs: Any, ) -> NePSComplexRandomSearch: """A complex random search algorithm that samples configurations uniformly at random, but allows for more complex sampling strategies. diff --git a/neps/optimizers/neps_priorband.py b/neps/optimizers/neps_priorband.py index d369fb73a..df5309500 100644 --- a/neps/optimizers/neps_priorband.py +++ b/neps/optimizers/neps_priorband.py @@ -26,17 +26,17 @@ class NePSPriorBandSampler: """Implement a sampler based on PriorBand.""" - """The pipeline space to optimize over.""" space: neps.space.neps_spaces.parameters.Pipeline + """The pipeline space to optimize over.""" - """The eta value to use for the SH bracket.""" eta: int + """The eta value to use for the SH bracket.""" - """The early stopping rate to use for the SH bracket.""" early_stopping_rate: int + """The early stopping rate to use for the SH bracket.""" - """The fidelity bounds.""" fid_bounds: tuple[int, int] | tuple[float, float] + """The fidelity bounds.""" def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: """Sample a configuration based on the PriorBand algorithm. diff --git a/neps/optimizers/neps_random_search.py b/neps/optimizers/neps_random_search.py index ca22c58a0..a9d8da12f 100644 --- a/neps/optimizers/neps_random_search.py +++ b/neps/optimizers/neps_random_search.py @@ -7,6 +7,7 @@ import heapq import random from collections.abc import Mapping +from dataclasses import dataclass from typing import TYPE_CHECKING from neps.space.neps_spaces.neps_space import _prepare_sampled_configs, resolve @@ -27,6 +28,7 @@ from neps.state.trial import Trial +@dataclass class NePSRandomSearch: """A simple random search optimizer for a NePS pipeline. It samples configurations randomly from the pipeline's domain and environment values. @@ -82,6 +84,7 @@ def __call__( return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) +@dataclass class NePSComplexRandomSearch: """A complex random search optimizer for a NePS pipeline. It samples configurations randomly from the pipeline's domain and environment values, diff --git a/neps/space/parsing.py b/neps/space/parsing.py index d7d6d5fcc..2168d4f2b 100644 --- a/neps/space/parsing.py +++ b/neps/space/parsing.py @@ -309,7 +309,7 @@ def convert_to_space( Returns: The SearchSpace object representing the search space. """ - # We quickly check ConfigSpace becuse it inherits from Mapping + # We quickly check ConfigSpace because it inherits from Mapping try: from ConfigSpace import ConfigurationSpace diff --git a/neps_examples/neps_spaces/pytorch_nn_example.ipynb b/neps_examples/neps_spaces/pytorch_nn_example.ipynb index 6b73edb37..73e63d7a2 100644 --- a/neps_examples/neps_spaces/pytorch_nn_example.ipynb +++ b/neps_examples/neps_spaces/pytorch_nn_example.ipynb @@ -170,7 +170,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "fa9cabbf", "metadata": {}, "outputs": [ From bc89f657a320cdce3cd3d294ae1e8d0c853026bd Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 6 Jul 2025 14:31:15 +0200 Subject: [PATCH 018/156] Refactor documentation and remove redundant arguments in optimizer modules --- neps/optimizers/algorithms.py | 5 +++-- neps/optimizers/ask_and_tell.py | 4 ++-- neps/optimizers/models/ftpfn.py | 1 - neps/optimizers/models/gp.py | 1 - neps/optimizers/neps_priorband.py | 2 +- neps/optimizers/priorband.py | 2 +- neps/optimizers/utils/initial_design.py | 1 - neps/space/neps_spaces/parameters.py | 3 --- 8 files changed, 7 insertions(+), 12 deletions(-) diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 9a93ac292..a24155811 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -1122,9 +1122,10 @@ def pibo( they cost, incentivising the optimizer to explore cheap, good performing configurations. This amount is modified over time. If "log", the cost will be log-transformed before being used. - !!! warning + !!! warning "Cost aware" + + If using `cost`, cost must be provided in the reports of the trials. - If using `cost`, cost must be provided in the reports of the trials. device: Device to use for the optimization. sample_prior_first: Whether to sample the prior configuration first. ignore_fidelity: Whether to ignore the fidelity parameter when sampling. diff --git a/neps/optimizers/ask_and_tell.py b/neps/optimizers/ask_and_tell.py index 04f91adac..a1119f00d 100644 --- a/neps/optimizers/ask_and_tell.py +++ b/neps/optimizers/ask_and_tell.py @@ -264,8 +264,8 @@ def tell( """Report the result of an evaluation back to the optimizer. Args: - config_id: The id of the configuration you got from - [`ask()`][neps.optimizers.ask_and_tell.AskAndTell.ask]. + trial: The trial to report the result for. This can be either + the trial id (a string) or the trial object itself. result: The result of the evaluation. This can be an exception, a float, or a mapping of values, similar to that which you would return from `evaluate_pipeline` when your normally diff --git a/neps/optimizers/models/ftpfn.py b/neps/optimizers/models/ftpfn.py index 72e88ce7a..08a443a54 100644 --- a/neps/optimizers/models/ftpfn.py +++ b/neps/optimizers/models/ftpfn.py @@ -131,7 +131,6 @@ def encode_ftpfn( Args: trials: The trials to encode encoder: The encoder to use - space: The search space budget_domain: The domain to use for the budgets of the FTPFN device: The device to use dtype: The dtype to use diff --git a/neps/optimizers/models/gp.py b/neps/optimizers/models/gp.py index 586ba371e..a70a235bb 100644 --- a/neps/optimizers/models/gp.py +++ b/neps/optimizers/models/gp.py @@ -248,7 +248,6 @@ def encode_trials_for_gp( Args: trials: The trials to encode. - space: The search space. encoder: The encoder to use. If `None`, one will be created. device: The device to use. diff --git a/neps/optimizers/neps_priorband.py b/neps/optimizers/neps_priorband.py index df5309500..5557d6831 100644 --- a/neps/optimizers/neps_priorband.py +++ b/neps/optimizers/neps_priorband.py @@ -43,7 +43,7 @@ def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: Args: table (pd.DataFrame): The table containing the configurations and their - performance. + performance. rung (int): The current rung of the optimization. Returns: diff --git a/neps/optimizers/priorband.py b/neps/optimizers/priorband.py index 9d6d23e4b..6d01581e9 100644 --- a/neps/optimizers/priorband.py +++ b/neps/optimizers/priorband.py @@ -52,7 +52,7 @@ def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: Args: table: The table of all the trials that have been run. - rung_to_sample_for: The rung to sample for. + rung: The rung to sample for. Returns: The sampled configuration. diff --git a/neps/optimizers/utils/initial_design.py b/neps/optimizers/utils/initial_design.py index 615a5a257..81a895489 100644 --- a/neps/optimizers/utils/initial_design.py +++ b/neps/optimizers/utils/initial_design.py @@ -24,7 +24,6 @@ def make_initial_design( """Generate the initial design of the optimization process. Args: - space: The search space to use. encoder: The encoder to use for encoding/decoding configurations. sampler: The sampler to use for the initial design. diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index a5ee19424..e92237fb9 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -99,9 +99,6 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Fidelity: # noqa: ARG002 raise ValueError("For a Fidelity object there is nothing to resolve.") -# ------------------------------------------------- - - class Pipeline(Resolvable): """A class representing a pipeline in NePS spaces. It contains attributes that can be resolved into a configuration string, From 5b68925f1708f2df512c353ca9c8686db869e7f7 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 6 Jul 2025 15:11:15 +0200 Subject: [PATCH 019/156] Enhance docstrings for Resolvable, Fidelity, Pipeline, Domain, and Operation classes to improve clarity and consistency --- neps/space/neps_spaces/parameters.py | 563 +++++++++++++++++++-------- 1 file changed, 402 insertions(+), 161 deletions(-) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index e92237fb9..90cdac683 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -32,7 +32,14 @@ def get_attrs(self) -> Mapping[str, Any]: raise NotImplementedError() def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: - """Create a new resolvable object from the given attributes.""" + """Create a new resolvable object from the given attributes. + + Args: + attrs: A mapping of attribute names to their values. + + Returns: + A new resolvable object with the specified attributes. + """ raise NotImplementedError() @@ -40,6 +47,12 @@ def resolvable_is_fully_resolved(resolvable: Resolvable) -> bool: """Check if a resolvable object is fully resolved. A resolvable object is considered fully resolved if all its attributes are either not instances of Resolvable or are themselves fully resolved. + + Args: + resolvable: Resolvable: + + Returns: + bool: True if the resolvable object is fully resolved, False otherwise. """ attr_objects = resolvable.get_attrs().values() return all( @@ -50,18 +63,17 @@ def resolvable_is_fully_resolved(resolvable: Resolvable) -> bool: class Fidelity(Resolvable, Generic[T]): """A class representing a fidelity in a NePS space. - It encapsulates a domain that defines the range of values for the fidelity. - :param domain: The domain of the fidelity, which can be an Integer or Float domain. - :raises ValueError: If the domain has a prior defined, as fidelity domains should not - have priors. + + Attributes: + domain: The domain of the fidelity, which can be an Integer or Float domain. """ def __init__(self, domain: Integer | Float): """Initialize the Fidelity with a domain. - :param domain: The domain of the fidelity, which can be an Integer or Float - domain. - :raises ValueError: If the domain has a prior defined, as fidelity domains should - not have priors. + + Args: + domain: The domain of the fidelity, which can be an Integer or Float domain. + """ if domain.has_prior: raise ValueError(f"The domain of a Fidelity can not have priors: {domain!r}.") @@ -70,14 +82,19 @@ def __init__(self, domain: Integer | Float): @property def min_value(self) -> int | float: """Get the minimum value of the fidelity domain. - :return: The minimum value of the fidelity domain. + + Returns: + The minimum value of the fidelity domain. + """ return self._domain.min_value @property def max_value(self) -> int | float: """Get the maximum value of the fidelity domain. - :return: The maximum value of the fidelity domain. + + Returns: + The maximum value of the fidelity domain. """ return self._domain.max_value @@ -85,31 +102,42 @@ def get_attrs(self) -> Mapping[str, Any]: """Get the attributes of the fidelity as a mapping. This method collects all attributes of the fidelity class and instance, excluding private attributes and methods, and returns them as a dictionary. - :return: A mapping of attribute names to their values. - :raises ValueError: If the fidelity has no domain defined. + + Returns: + A mapping of attribute names to their values. + + Raises: + ValueError: If the fidelity has no domain defined. + """ raise ValueError("For a Fidelity object there is nothing to resolve.") def from_attrs(self, attrs: Mapping[str, Any]) -> Fidelity: # noqa: ARG002 """Create a new Fidelity instance from the given attributes. - :param attrs: A mapping of attribute names to their values. - :return: A new Fidelity instance with the specified attributes. - :raises ValueError: If the fidelity has no domain defined. + + Args: + attrs: A mapping of attribute names to their values. + + Returns: + A new Fidelity instance with the specified attributes. + + Raises: + ValueError: If the fidelity has no domain defined. + """ raise ValueError("For a Fidelity object there is nothing to resolve.") class Pipeline(Resolvable): - """A class representing a pipeline in NePS spaces. - It contains attributes that can be resolved into a configuration string, - and it can be used to sample configurations based on defined domains. - """ + """A class representing a pipeline in NePS spaces.""" @property def fidelity_attrs(self) -> Mapping[str, Fidelity]: """Get the fidelity attributes of the pipeline. Fidelity attributes are special attributes that represent the fidelity of the pipeline. - :return: A mapping of fidelity attribute names to Fidelity objects. + + Returns: + A mapping of attribute names to Fidelity objects. """ return {k: v for k, v in self.get_attrs().items() if isinstance(v, Fidelity)} @@ -117,7 +145,9 @@ def get_attrs(self) -> Mapping[str, Any]: """Get the attributes of the pipeline as a mapping. This method collects all attributes of the pipeline class and instance, excluding private attributes and methods, and returns them as a dictionary. - :return: A mapping of attribute names to their values. + + Returns: + A mapping of attribute names to their values. """ attrs = {} @@ -139,10 +169,16 @@ def get_attrs(self) -> Mapping[str, Any]: def from_attrs(self, attrs: Mapping[str, Any]) -> Pipeline: """Create a new Pipeline instance from the given attributes. - :param attrs: A mapping of attribute names to their values. - :return: A new Pipeline instance with the specified attributes. - :raises ValueError: If the attributes do not match the pipeline's expected - structure. + + Args: + attrs: A mapping of attribute names to their values. + + + Returns: + A new Pipeline instance with the specified attributes. + + Raises: + ValueError: If the attributes do not match the pipeline's expected structure. """ new_pipeline = Pipeline() for name, value in attrs.items(): @@ -159,10 +195,7 @@ class ConfidenceLevel(enum.Enum): class Domain(Resolvable, abc.ABC, Generic[T]): - """An abstract base class representing a domain in NePS spaces. - It defines the properties and methods that all domains must implement, - such as min and max values, sampling, and centered domains. - """ + """An abstract base class representing a domain in NePS spaces.""" @property @abc.abstractmethod @@ -187,6 +220,7 @@ def has_prior(self) -> bool: def prior(self) -> T: """Get the prior value of the domain. Raises ValueError if the domain has no prior defined. + """ raise NotImplementedError() @@ -195,6 +229,7 @@ def prior(self) -> T: def prior_confidence(self) -> ConfidenceLevel: """Get the confidence level of the prior. Raises ValueError if the domain has no prior defined. + """ raise NotImplementedError() @@ -204,6 +239,7 @@ def range_compatibility_identifier(self) -> str: """Get a string identifier for the range compatibility of the domain. This identifier is used to check if two domains are compatible based on their ranges. + """ raise NotImplementedError() @@ -211,6 +247,7 @@ def range_compatibility_identifier(self) -> str: def sample(self) -> T: """Sample a value from the domain. Returns a value of type T that is within the domain's range. + """ raise NotImplementedError() @@ -222,10 +259,19 @@ def centered_around( ) -> Domain[T]: """Create a new domain centered around a given value with a specified confidence level. - :param center: The value around which to center the new domain. - :param confidence: The confidence level for the new domain. - :return: A new Domain instance that is centered around the specified value. - :raises ValueError: If the center value is not within the domain's range. + + Args: + center: The value around which to center the new domain. + confidence: The confidence level for the new domain. + center: T: + confidence: ConfidenceLevel: + + Returns: + A new Domain instance that is centered around the specified value. + + Raises: + ValueError: If the center value is not within the domain's range. + """ raise NotImplementedError() @@ -233,16 +279,24 @@ def get_attrs(self) -> Mapping[str, Any]: """Get the attributes of the domain as a mapping. This method collects all attributes of the domain class and instance, excluding private attributes and methods, and returns them as a dictionary. - :return: A mapping of attribute names to their values. + + Returns: + A mapping of attribute names to their values. """ return {k.lstrip("_"): v for k, v in vars(self).items()} def from_attrs(self, attrs: Mapping[str, Any]) -> Domain[T]: """Create a new Domain instance from the given attributes. - :param attrs: A mapping of attribute names to their values. - :return: A new Domain instance with the specified attributes. - :raises ValueError: If the attributes do not match the domain's expected - structure. + + Args: + attrs: A mapping of attribute names to their values. + + Returns: + A new Domain instance with the specified attributes. + + Raises: + ValueError: If the attributes do not match the domain's expected structure. + """ return type(self)(**attrs) @@ -254,6 +308,25 @@ def _calculate_new_domain_bounds( center: int | float, confidence: ConfidenceLevel, ) -> tuple[int, int] | tuple[float, float]: + """Calculate new bounds for a domain based on a center value and confidence level. + This function determines the new minimum and maximum values for a domain based on + a given center value and a confidence level. It splits the domain range into chunks + and adjusts the bounds based on the specified confidence level. + + Args: + number_type: The type of numbers in the domain (int or float). + min_value: The minimum value of the domain. + max_value: The maximum value of the domain. + center: The center value around which to calculate the new bounds. + confidence: The confidence level for the new bounds. + + Returns: + A tuple containing the new minimum and maximum values for the domain. + + Raises: + ValueError: If the center value is not within the domain's range or if the + number_type is not supported. + """ if center < min_value or center > max_value: raise ValueError( f"Center value {center!r} must be within domain range [{min_value!r}," @@ -294,11 +367,11 @@ def _calculate_new_domain_bounds( class Categorical(Domain[int], Generic[T]): """A domain representing a categorical choice from a set of options. - It allows for sampling from a predefined set of choices and can be centered around - a specific choice with a given confidence level. - :param choices: A tuple of choices or a Domain of choices. - :param prior_index: The index of the prior choice in the choices tuple. - :param prior_confidence: The confidence level of the prior choice. + + Attributes: + choices: A tuple of choices or a Domain of choices. + prior_index: The index of the prior choice in the choices tuple. + prior_confidence: The confidence level of the prior choice. """ def __init__( @@ -308,10 +381,12 @@ def __init__( prior_confidence: ConfidenceLevel | _Unset = _UNSET, ): """Initialize the Categorical domain with choices and optional prior. - :param choices: A tuple of choices or a Domain of choices. - :param prior_index: The index of the prior choice in the choices tuple. - :param prior_confidence: The confidence level of the prior choice. - :raises ValueError: If the choices are empty or prior_index is out of bounds. + + Args: + choices: A tuple of choices or a Domain of choices. + prior_index: The index of the prior choice in the choices tuple. + prior_confidence: The confidence level of the prior choice. + """ self._choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T] if isinstance(choices, Sequence): @@ -324,36 +399,53 @@ def __init__( @property def min_value(self) -> int: """Get the minimum value of the categorical domain. - :return: The minimum index of the choices, which is always 0. + + Returns: + The minimum index of the choices, which is always 0. + """ return 0 @property def max_value(self) -> int: """Get the maximum value of the categorical domain. - :return: The maximum index of the choices, which is the length of choices minus 1. + + Returns: + The maximum index of the choices, which is the length of the choices tuple + minus one. + """ return max(len(cast(tuple, self._choices)) - 1, 0) @property def choices(self) -> tuple[T | Domain[T] | Resolvable, ...] | Domain[T]: """Get the choices available in the categorical domain. - :return: A tuple of choices or a Domain of choices. + + Returns: + A tuple of choices or a Domain of choices. + """ return self._choices @property def has_prior(self) -> bool: """Check if the categorical domain has a prior defined. - :return: True if the prior index and confidence are set, False otherwise. + + Returns: + True if the prior index and prior confidence are set, False otherwise. """ return self._prior_index is not _UNSET and self._prior_confidence is not _UNSET @property def prior(self) -> int: """Get the prior index of the categorical domain. - :return: The index of the prior choice in the choices tuple. - :raises ValueError: If the domain has no prior defined. + + Returns: + The index of the prior choice in the choices tuple. + + Raises: + ValueError: If the domain has no prior defined. + """ if not self.has_prior: raise ValueError("Domain has no prior defined.") @@ -362,8 +454,13 @@ def prior(self) -> int: @property def prior_confidence(self) -> ConfidenceLevel: """Get the confidence level of the prior choice. - :return: The confidence level of the prior choice. - :raises ValueError: If the domain has no prior defined. + + Returns: + The confidence level of the prior choice. + + Raises: + ValueError: If the domain has no prior defined. + """ if not self.has_prior: raise ValueError("Domain has no prior defined.") @@ -372,14 +469,22 @@ def prior_confidence(self) -> ConfidenceLevel: @property def range_compatibility_identifier(self) -> str: """Get a string identifier for the range compatibility of the categorical domain. - :return: A string representation of the number of choices in the domain. + + Returns: + A string representation of the number of choices in the domain. + """ return f"{len(cast(tuple, self._choices))}" def sample(self) -> int: """Sample a random index from the categorical choices. - :return: A randomly selected index from the choices tuple. - :raises ValueError: If the choices are empty. + + Returns: + A randomly selected index from the choices tuple. + + Raises: + ValueError: If the choices are empty. + """ return int(random.randint(0, len(cast(tuple[T], self._choices)) - 1)) @@ -389,11 +494,20 @@ def centered_around( confidence: ConfidenceLevel, ) -> Categorical: """Create a new categorical domain centered around a specific choice index. - :param center: The index of the choice around which to center the new domain. - :param confidence: The confidence level for the new domain. - :return: A new Categorical instance with a range centered around the specified - choice index. - :raises ValueError: If the center index is out of bounds of the choices. + + Args: + center: The index of the choice around which to center the new domain. + confidence: The confidence level for the new domain. + center: int: + confidence: ConfidenceLevel: + + Returns: + A new Categorical instance with a range centered around the specified + choice index. + + Raises: + ValueError: If the center index is out of bounds of the choices. + """ new_min, new_max = cast( tuple[int, int], @@ -415,13 +529,13 @@ def centered_around( class Float(Domain[float]): """A domain representing a continuous range of floating-point values. - It allows for sampling from a range defined by minimum and maximum values, - and can be centered around a specific value with a given confidence level. - :param min_value: The minimum value of the domain. - :param max_value: The maximum value of the domain. - :param log: Whether to sample values on a logarithmic scale. - :param prior: The prior value for the domain, if any. - :param prior_confidence: The confidence level of the prior value. + + Attributes: + min_value: The minimum value of the domain. + max_value: The maximum value of the domain. + log: Whether to sample values on a logarithmic scale. + prior: The prior value for the domain, if any. + prior_confidence: The confidence level of the prior value. """ def __init__( @@ -433,12 +547,14 @@ def __init__( prior_confidence: ConfidenceLevel | _Unset = _UNSET, ): """Initialize the Float domain with min and max values, and optional prior. - :param min_value: The minimum value of the domain. - :param max_value: The maximum value of the domain. - :param log: Whether to sample values on a logarithmic scale. - :param prior: The prior value for the domain, if any. - :param prior_confidence: The confidence level of the prior value. - :raises ValueError: If min_value is greater than max_value. + + Args: + min_value: The minimum value of the domain. + max_value: The maximum value of the domain. + log: Whether to sample values on a logarithmic scale. + prior: The prior value for the domain, if any. + prior_confidence: The confidence level of the prior value. + """ self._min_value = min_value self._max_value = max_value @@ -449,31 +565,49 @@ def __init__( @property def min_value(self) -> float: """Get the minimum value of the floating-point domain. - :return: The minimum value of the domain. - :raises ValueError: If min_value is greater than max_value. + + Returns: + The minimum value of the domain. + + Raises: + ValueError: If min_value is greater than max_value. + """ return self._min_value @property def max_value(self) -> float: """Get the maximum value of the floating-point domain. - :return: The maximum value of the domain. - :raises ValueError: If min_value is greater than max_value. + + Returns: + The maximum value of the domain. + + Raises: + ValueError: If min_value is greater than max_value. + """ return self._max_value @property def has_prior(self) -> bool: """Check if the floating-point domain has a prior defined. - :return: True if the prior and prior confidence are set, False otherwise. + + Returns: + True if the prior and prior confidence are set, False otherwise. + """ return self._prior is not _UNSET and self._prior_confidence is not _UNSET @property def prior(self) -> float: """Get the prior value of the floating-point domain. - :return: The prior value of the domain. - :raises ValueError: If the domain has no prior defined. + + Returns: + The prior value of the domain. + + Raises: + ValueError: If the domain has no prior defined. + """ if not self.has_prior: raise ValueError("Domain has no prior defined.") @@ -482,8 +616,13 @@ def prior(self) -> float: @property def prior_confidence(self) -> ConfidenceLevel: """Get the confidence level of the prior value. - :return: The confidence level of the prior value. - :raises ValueError: If the domain has no prior defined. + + Returns: + The confidence level of the prior value. + + Raises: + ValueError: If the domain has no prior defined. + """ if not self.has_prior: raise ValueError("Domain has no prior defined.") @@ -493,15 +632,23 @@ def prior_confidence(self) -> ConfidenceLevel: def range_compatibility_identifier(self) -> str: """Get a string identifier for the range compatibility of the floating-point domain. - :return: A string representation of the minimum and maximum values, and whether - the domain is logarithmic. + + Returns: + A string representation of the minimum and maximum values, and whether + the domain is logarithmic. + """ return f"{self._min_value}_{self._max_value}_{self._log}" def sample(self) -> float: """Sample a random floating-point value from the domain. - :return: A randomly selected floating-point value within the domain's range. - :raises ValueError: If min_value is greater than max_value. + + Returns: + A randomly selected floating-point value within the domain's range. + + Raises: + ValueError: If min_value is greater than max_value. + """ if self._log: log_min = math.log(self._min_value) @@ -515,10 +662,19 @@ def centered_around( confidence: ConfidenceLevel, ) -> Float: """Create a new floating-point domain centered around a specific value. - :param center: The value around which to center the new domain. - :param confidence: The confidence level for the new domain. - :return: A new Float instance that is centered around the specified value. - :raises ValueError: If the center value is not within the domain's range. + + Args: + center: The value around which to center the new domain. + confidence: The confidence level for the new domain. + center: float: + confidence: ConfidenceLevel: + + Returns: + A new Float instance that is centered around the specified value. + + Raises: + ValueError: If the center value is not within the domain's range. + """ new_min, new_max = _calculate_new_domain_bounds( number_type=float, @@ -538,13 +694,13 @@ def centered_around( class Integer(Domain[int]): """A domain representing a range of integer values. - It allows for sampling from a range defined by minimum and maximum values, - and can be centered around a specific value with a given confidence level. - :param min_value: The minimum value of the domain. - :param max_value: The maximum value of the domain. - :param log: Whether to sample values on a logarithmic scale. - :param prior: The prior value for the domain, if any. - :param prior_confidence: The confidence level of the prior value. + + Attributes: + min_value: The minimum value of the domain. + max_value: The maximum value of the domain. + log: Whether to sample values on a logarithmic scale. + prior: The prior value for the domain, if any. + prior_confidence: The confidence level of the prior value. """ def __init__( @@ -556,12 +712,13 @@ def __init__( prior_confidence: ConfidenceLevel | _Unset = _UNSET, ): """Initialize the Integer domain with min and max values, and optional prior. - :param min_value: The minimum value of the domain. - :param max_value: The maximum value of the domain. - :param log: Whether to sample values on a logarithmic scale. - :param prior: The prior value for the domain, if any. - :param prior_confidence: The confidence level of the prior value. - :raises ValueError: If min_value is greater than max_value. + + Args: + min_value: The minimum value of the domain. + max_value: The maximum value of the domain. + log: Whether to sample values on a logarithmic scale. + prior: The prior value for the domain, if any. + prior_confidence: The confidence level of the prior value. """ self._min_value = min_value self._max_value = max_value @@ -572,31 +729,49 @@ def __init__( @property def min_value(self) -> int: """Get the minimum value of the integer domain. - :return: The minimum value of the domain. - :raises ValueError: If min_value is greater than max_value. + + Returns: + The minimum value of the domain. + + Raises: + ValueError: If min_value is greater than max_value. + """ return self._min_value @property def max_value(self) -> int: """Get the maximum value of the integer domain. - :return: The maximum value of the domain. - :raises ValueError: If min_value is greater than max_value. + + Returns: + The maximum value of the domain. + + Raises: + ValueError: If min_value is greater than max_value. + """ return self._max_value @property def has_prior(self) -> bool: """Check if the integer domain has a prior defined. - :return: True if the prior and prior confidence are set, False otherwise. + + Returns: + True if the prior and prior confidence are set, False otherwise. + """ return self._prior is not _UNSET and self._prior_confidence is not _UNSET @property def prior(self) -> int: """Get the prior value of the integer domain. - :return: The prior value of the domain. - :raises ValueError: If the domain has no prior defined. + + Returns: + The prior value of the domain. + + Raises: + ValueError: If the domain has no prior defined. + """ if not self.has_prior: raise ValueError("Domain has no prior defined.") @@ -605,8 +780,13 @@ def prior(self) -> int: @property def prior_confidence(self) -> ConfidenceLevel: """Get the confidence level of the prior value. - :return: The confidence level of the prior value. - :raises ValueError: If the domain has no prior defined. + + Returns: + The confidence level of the prior value. + + Raises: + ValueError: If the domain has no prior defined. + """ if not self.has_prior: raise ValueError("Domain has no prior defined.") @@ -615,16 +795,24 @@ def prior_confidence(self) -> ConfidenceLevel: @property def range_compatibility_identifier(self) -> str: """Get a string identifier for the range compatibility of the integer domain. - :return: A string representation of the minimum and maximum values, and whether - the domain is logarithmic. + + Returns: + A string representation of the minimum and maximum values, and whether + the domain is logarithmic. + """ return f"{self._min_value}_{self._max_value}_{self._log}" def sample(self) -> int: """Sample a random integer value from the domain. - :return: A randomly selected integer value within the domain's range. - :raises NotImplementedError: If the domain is set to sample on a logarithmic - scale, as this is not implemented yet. + + Returns: + A randomly selected integer value within the domain's range. + + Raises: + NotImplementedError: If the domain is set to sample on a logarithmic + scale, as this is not implemented yet. + """ if self._log: raise NotImplementedError("TODO.") @@ -636,10 +824,19 @@ def centered_around( confidence: ConfidenceLevel, ) -> Integer: """Create a new integer domain centered around a specific value. - :param center: The value around which to center the new domain. - :param confidence: The confidence level for the new domain. - :return: A new Integer instance that is centered around the specified value. - :raises ValueError: If the center value is not within the domain's range. + + Args: + center: The value around which to center the new domain. + confidence: The confidence level for the new domain. + center: int: + confidence: ConfidenceLevel: + + Returns: + A new Integer instance that is centered around the specified value. + + Raises: + ValueError: If the center value is not within the domain's range. + """ new_min, new_max = cast( tuple[int, int], @@ -662,13 +859,11 @@ def centered_around( class Operation(Resolvable): """A class representing an operation in a NePS space. - It encapsulates an operator (a callable or a string), arguments, and keyword - arguments. - The operator can be a function or a string representing a function name. - :param operator: The operator to be used in the operation, can be a callable or a - string. - :param args: A sequence of arguments to be passed to the operator. - :param kwargs: A mapping of keyword arguments to be passed to the operator. + + Attributes: + operator: The operator to be used in the operation, can be a callable or a string. + args: A sequence of arguments to be passed to the operator. + kwargs: A mapping of keyword arguments to be passed to the operator. """ def __init__( @@ -678,11 +873,13 @@ def __init__( kwargs: Mapping[str, Any] | Resolvable | None = None, ): """Initialize the Operation with an operator, arguments, and keyword arguments. - :param operator: The operator to be used in the operation, can be a callable or a - string. - :param args: A sequence of arguments to be passed to the operator. - :param kwargs: A mapping of keyword arguments to be passed to the operator. - :raises ValueError: If the operator is not callable or a string. + + Args: + operator: The operator to be used in the operation, can be a callable or a + string. + args: A sequence of arguments to be passed to the operator. + kwargs: A mapping of keyword arguments to be passed to the operator. + """ self._operator = operator @@ -701,24 +898,39 @@ def __init__( @property def operator(self) -> Callable | str: """Get the operator of the operation. - :return: The operator, which can be a callable or a string. - :raises ValueError: If the operator is not callable or a string. + + Returns: + The operator, which can be a callable or a string. + + Raises: + ValueError: If the operator is not callable or a string. + """ return self._operator @property def args(self) -> tuple[Any, ...]: """Get the arguments of the operation. - :return: A tuple of arguments to be passed to the operator. - :raises ValueError: If the args are not a tuple or Resolvable. + + Returns: + A tuple of arguments to be passed to the operator. + + Raises: + ValueError: If the args are not a tuple or Resolvable. + """ return cast(tuple[Any, ...], self._args) @property def kwargs(self) -> Mapping[str, Any]: """Get the keyword arguments of the operation. - :return: A mapping of keyword arguments to be passed to the operator. - :raises ValueError: If the kwargs are not a mapping or Resolvable. + + Returns: + A mapping of keyword arguments to be passed to the operator. + + Raises: + ValueError: If the kwargs are not a mapping or Resolvable. + """ return cast(Mapping[str, Any], self._kwargs) @@ -726,7 +938,10 @@ def get_attrs(self) -> Mapping[str, Any]: """Get the attributes of the operation as a mapping. This method collects all attributes of the operation class and instance, excluding private attributes and methods, and returns them as a dictionary. - :return: A mapping of attribute names to their values. + + Returns: + A mapping of attribute names to their values. + """ # TODO: [lum] simplify this. We know the fields. Maybe other places too. result: dict[str, Any] = {} @@ -745,10 +960,16 @@ def get_attrs(self) -> Mapping[str, Any]: def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: """Create a new Operation instance from the given attributes. - :param attrs: A mapping of attribute names to their values. - :return: A new Operation instance with the specified attributes. - :raises ValueError: If the attributes do not match the operation's expected - structure. + + Args: + attrs: A mapping of attribute names to their values. + + Returns: + A new Operation instance with the specified attributes. + + Raises: + ValueError: If the attributes do not match the operation's expected structure. + """ # TODO: [lum] simplify this. We know the fields. Maybe other places too. final_attrs: dict[str, Any] = {} @@ -768,37 +989,50 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: class Resampled(Resolvable): """A class representing a resampling operation in a NePS space. - It can either be a resolvable object or a string representing a resampling by name. - :param source: The source of the resampling, can be a resolvable object or a string. + + Attributes: + source: The source of the resampling, which can be a resolvable object or a + string. """ def __init__(self, source: Resolvable | str): """Initialize the Resampled object with a source. - :param source: The source of the resampling, which can be a resolvable object or - a string. - :raises ValueError: If the source is not a resolvable object or a string. + + Args: + source: The source of the resampling, can be a resolvable object or a string. """ self._source = source @property def source(self) -> Resolvable | str: """Get the source of the resampling. - :return: The source of the resampling, which can be a resolvable object or a - string. + + Returns: + The source of the resampling, which can be a resolvable object or a string + """ return self._source @property def is_resampling_by_name(self) -> bool: """Check if the resampling is by name. - :return: True if the source is a string, False otherwise. + + Returns: + True if the source is a string, indicating a resampling by name, + False if the source is a resolvable object. + """ return isinstance(self._source, str) def get_attrs(self) -> Mapping[str, Any]: """Get the attributes of the resampling source as a mapping. - :return: A mapping of attribute names to their values. - :raises ValueError: If the resampling is by name or the source is not resolvable. + + Returns: + A mapping of attribute names to their values. + + Raises: + ValueError: If the resampling is by name or the source is not resolvable. + """ if self.is_resampling_by_name: raise ValueError( @@ -812,9 +1046,16 @@ def get_attrs(self) -> Mapping[str, Any]: def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: """Create a new resolvable object from the given attributes. - :param attrs: A mapping of attribute names to their values. - :return: A new resolvable object created from the specified attributes. - :raises ValueError: If the resampling is by name or the source is not resolvable. + + Args: + attrs: A mapping of attribute names to their values. + + Returns: + A new resolvable object created from the specified attributes. + + Raises: + ValueError: If the resampling is by name or the source is not resolvable. + """ if self.is_resampling_by_name: raise ValueError( From e3dd79e8591d05b38c6c69f9f3c5b62f6f2dc312 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 6 Jul 2025 22:04:02 +0200 Subject: [PATCH 020/156] Refactor SamplingResolutionContext and Domain classes for improved clarity and consistency - Updated docstrings in SamplingResolutionContext to use Args and Returns sections for better readability. - Changed parameter names from `prior_index` to `prior` in Categorical class to enhance clarity. - Introduced a new function `convert_confidence_level` to handle string to ConfidenceLevel conversion. - Updated tests to reflect changes in parameter names from `prior_index` to `prior`. - Enhanced docstrings across various classes and methods in the sampling module for consistency. --- neps/optimizers/neps_algorithms.py | 6 + neps/optimizers/neps_random_search.py | 80 +++-- neps/space/neps_spaces/config_string.py | 84 ++++-- neps/space/neps_spaces/neps_space.py | 240 ++++++++++----- neps/space/neps_spaces/parameters.py | 74 +++-- neps/space/neps_spaces/sampling.py | 280 +++++++++++------- .../test_neps_space/test_domain__centering.py | 4 +- .../test_neps_space/test_neps_integration.py | 10 +- .../test_search_space__reuse_arch_elements.py | 4 +- 9 files changed, 523 insertions(+), 259 deletions(-) diff --git a/neps/optimizers/neps_algorithms.py b/neps/optimizers/neps_algorithms.py index bc5e8960a..3a5cf65eb 100644 --- a/neps/optimizers/neps_algorithms.py +++ b/neps/optimizers/neps_algorithms.py @@ -1,3 +1,9 @@ +"""NePS Algorithms +=========== +This module provides implementations of various NePS algorithms for optimizing pipeline +spaces. +""" + from __future__ import annotations from collections.abc import Callable, Sequence diff --git a/neps/optimizers/neps_random_search.py b/neps/optimizers/neps_random_search.py index a9d8da12f..3b6904268 100644 --- a/neps/optimizers/neps_random_search.py +++ b/neps/optimizers/neps_random_search.py @@ -32,14 +32,22 @@ class NePSRandomSearch: """A simple random search optimizer for a NePS pipeline. It samples configurations randomly from the pipeline's domain and environment values. - :param pipeline: The pipeline to optimize, which should be a Pipeline object. - :raises ValueError: If the pipeline is not a Pipeline object. + + Args: + pipeline: The pipeline to optimize, which should be a Pipeline object. + + Raises: + ValueError: If the pipeline is not a Pipeline object. """ def __init__(self, pipeline: Pipeline): """Initialize the RandomSearch optimizer with a pipeline. - :param pipeline: The pipeline to optimize, which should be a Pipeline object. - :raises ValueError: If the pipeline is not a Pipeline object. + + Args: + pipeline: The pipeline to optimize, which should be a Pipeline object. + + Raises: + ValueError: If the pipeline is not a Pipeline object. """ self._pipeline = pipeline @@ -58,15 +66,21 @@ def __call__( ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: """Sample configurations randomly from the pipeline's domain and environment values. - :param trials: A mapping of trial IDs to Trial objects, representing previous - trials. - :param budget_info: The budget information for the optimization process. - :param n: The number of configurations to sample. If None, a single configuration - will be sampled. - :return: A SampledConfig object or a list of SampledConfig objects, depending - on the value of n. - :raises ValueError: If the pipeline is not a Pipeline object or if the trials are - not a valid mapping of trial IDs to Trial objects. + + Args: + trials: A mapping of trial IDs to Trial objects, representing previous + trials. + budget_info: The budget information for the optimization process. + n: The number of configurations to sample. If None, a single configuration + will be sampled. + + Returns: + A SampledConfig object or a list of SampledConfig objects, depending + on the value of n. + + Raises: + ValueError: If the pipeline is not a Pipeline object or if the trials are + not a valid mapping of trial IDs to Trial objects. """ n_prev_trials = len(trials) n_requested = 1 if n is None else n @@ -89,14 +103,22 @@ class NePSComplexRandomSearch: """A complex random search optimizer for a NePS pipeline. It samples configurations randomly from the pipeline's domain and environment values, and also performs mutations and crossovers based on previous successful trials. - :param pipeline: The pipeline to optimize, which should be a Pipeline object. - :raises ValueError: If the pipeline is not a Pipeline object. + + Args: + pipeline: The pipeline to optimize, which should be a Pipeline object. + + Raises: + ValueError: If the pipeline is not a Pipeline object. """ def __init__(self, pipeline: Pipeline): """Initialize the ComplexRandomSearch optimizer with a pipeline. - :param pipeline: The pipeline to optimize, which should be a Pipeline object. - :raises ValueError: If the pipeline is not a Pipeline object. + + Args: + pipeline: The pipeline to optimize, which should be a Pipeline object. + + Raises: + ValueError: If the pipeline is not a Pipeline object. """ self._pipeline = pipeline @@ -126,15 +148,21 @@ def __call__( """Sample configurations randomly from the pipeline's domain and environment values, and also perform mutations and crossovers based on previous successful trials. - :param trials: A mapping of trial IDs to Trial objects, representing previous - trials. - :param budget_info: The budget information for the optimization process. - :param n: The number of configurations to sample. If None, a single configuration - will be sampled. - :return: A SampledConfig object or a list of SampledConfig objects, depending - on the value of n. - :raises ValueError: If the pipeline is not a Pipeline object or if the trials are - not a valid mapping of trial IDs to Trial objects. + + Args: + trials: A mapping of trial IDs to Trial objects, representing previous + trials. + budget_info: The budget information for the optimization process. + n: The number of configurations to sample. If None, a single configuration + will be sampled. + + Returns: + A SampledConfig object or a list of SampledConfig objects, depending + on the value of n. + + Raises: + ValueError: If the pipeline is not a Pipeline object or if the trials are + not a valid mapping of trial IDs to Trial objects. """ n_prev_trials = len(trials) n_requested = 1 if n is None else n diff --git a/neps/space/neps_spaces/config_string.py b/neps/space/neps_spaces/config_string.py index 8e363f581..62ed3ec55 100644 --- a/neps/space/neps_spaces/config_string.py +++ b/neps/space/neps_spaces/config_string.py @@ -16,14 +16,16 @@ @dataclasses.dataclass(frozen=True) class UnwrappedConfigStringPart: """A data class representing a part of an unwrapped configuration string. - :param level: The hierarchy level of this part in the configuration string. - :param opening_index: The index of the opening parenthesis in the original string. - :param operator: The operator of this part, which is the first word in the - parenthesis. - :param hyperparameters: The hyperparameters of this part, if any, enclosed in curly - braces. - :param operands: The operands of this part, which are the remaining content in the - parenthesis. + + Args: + level: The hierarchy level of this part in the configuration string. + opening_index: The index of the opening parenthesis in the original string. + operator: The operator of this part, which is the first word in the + parenthesis. + hyperparameters: The hyperparameters of this part, if any, enclosed in curly + braces. + operands: The operands of this part, which are the remaining content in the + parenthesis. """ level: int @@ -37,9 +39,15 @@ class UnwrappedConfigStringPart: def unwrap_config_string(config_string: str) -> tuple[UnwrappedConfigStringPart, ...]: """For a given config string, gets the parenthetic contents of it and uses them to construct objects of type `UnwrappedConfigStringPart`. - First unwraps a given parenthesised config_string into parts. Then it converts these parts into objects with structured information. + + Args: + config_string: The configuration string to be unwrapped. + + Returns: + A tuple of `UnwrappedConfigStringPart` objects representing the unwrapped + configuration string. """ # A workaround needed since in the existing configurations # generated by previous methods, e.g. the `resBlock resBlock` and `resBlock` items @@ -102,10 +110,14 @@ def wrap_config_into_string( max_level: int | None = None, ) -> str: """For a given unwrapped config, returns the string representing it. - :param unwrapped_config: The unwrapped config - :param max_level: - An optional int telling which is the maximal considered level. - Bigger levels are ignored. + + Args: + unwrapped_config: The unwrapped config + max_level: An optional int telling which is the maximal considered level. + Bigger levels are ignored. + + Returns: + The string representation of the unwrapped config. """ result = [] current_level = 0 @@ -156,8 +168,12 @@ class ConfigString: def __init__(self, config_string: str) -> None: """Initialize the ConfigString with a given configuration string. - :param config_string: The configuration string to be wrapped. - :raises ValueError: If the config_string is None or empty. + + Args: + config_string: The configuration string to be wrapped. + + Raises: + ValueError: If the config_string is None or empty. """ if config_string is None or len(config_string) == 0: raise ValueError(f"Invalid config string: {config_string}") @@ -174,9 +190,13 @@ def __init__(self, config_string: str) -> None: @property def unwrapped(self) -> tuple[UnwrappedConfigStringPart, ...]: """Get the unwrapped representation of the configuration string. - :return: A tuple of UnwrappedConfigStringPart objects representing the unwrapped - config. - :raises ValueError: If there is an error unwrapping the config string. + + Returns: + A tuple of UnwrappedConfigStringPart objects representing the unwrapped + config. + + Raises: + ValueError: If there is an error unwrapping the config string. """ # If the unwrapped is already cached, return it if self._unwrapped is not None: @@ -202,8 +222,12 @@ def unwrapped(self) -> tuple[UnwrappedConfigStringPart, ...]: @property def max_hierarchy_level(self) -> int: """Get the maximum hierarchy level of the configuration string. - :return: The maximum hierarchy level of the configuration string. - :raises ValueError: If the maximum hierarchy level is invalid. + + Returns: + The maximum hierarchy level of the configuration string. + + Raises: + ValueError: If the maximum hierarchy level is invalid. """ if self._max_hierarchy_level is not None: return self._max_hierarchy_level @@ -218,10 +242,16 @@ def max_hierarchy_level(self) -> int: def at_hierarchy_level(self, level: int) -> ConfigString: """Get the configuration string at a specific hierarchy level. - :param level: The hierarchy level to retrieve the configuration string for. - :return: A ConfigString object representing the configuration at the specified - hierarchy level. - :raises ValueError: If the level is invalid (0 or out of bounds). + + Args: + level: The hierarchy level to retrieve the configuration string for. + + Returns: + A ConfigString object representing the configuration at the specified + hierarchy level. + + Raises: + ValueError: If the level is invalid (0 or out of bounds). """ if level == 0: raise ValueError(f"Invalid value for `level`. Received level == 0: {level}") @@ -254,8 +284,10 @@ def at_hierarchy_level(self, level: int) -> ConfigString: def pretty_format(self) -> str: """Get a pretty formatted string representation of the configuration string. - :return: A string representation of the configuration string with indentation - based on the hierarchy level of each part. + + Returns: + A string representation of the configuration string with indentation + based on the hierarchy level of each part. """ format_str_with_kwargs = ( "{indent}{item.level:0>2d} :: {item.operator} {item.hyperparameters}" diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 7cbf38da5..a8eaed5c0 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -38,14 +38,17 @@ class SamplingResolutionContext: """A context for resolving samplings in a NePS space. It manages the resolution root, domain sampler, environment values, and keeps track of samplings made and resolved objects. - :param resolution_root: The root of the resolution, which should be a Resolvable - object. - :param domain_sampler: The DomainSampler to use for sampling from Domain objects. - :param environment_values: A mapping of environment values that are fixed and not - related - to samplings. These values can be used in the resolution process. - :raises ValueError: If the resolution_root is not a Resolvable, or if the - domain_sampler is not a DomainSampler, or if the environment_values is not a Mapping. + + Args: + resolution_root: The root of the resolution, which should be a Resolvable + object. + domain_sampler: The DomainSampler to use for sampling from Domain objects. + environment_values: A mapping of environment values that are fixed and not + related to samplings. These values can be used in the resolution process. + + Raises: + ValueError: If the resolution_root is not a Resolvable, or if the domain_sampler + is not a DomainSampler, or if the environment_values is not a Mapping. """ def __init__( @@ -57,14 +60,18 @@ def __init__( ): """Initialize the SamplingResolutionContext with a resolution root, domain sampler, and environment values. - :param resolution_root: The root of the resolution, which should be a Resolvable - object. - :param domain_sampler: The DomainSampler to use for sampling from Domain objects. - :param environment_values: A mapping of environment values that are fixed and not - related to samplings. These values can be used in the resolution process. - :raises ValueError: If the resolution_root is not a Resolvable, or if the - domain_sampler is not a DomainSampler, or if the environment_values is not a - Mapping. + + Args: + resolution_root: The root of the resolution, which should be a Resolvable + object. + domain_sampler: The DomainSampler to use for sampling from Domain objects. + environment_values: A mapping of environment values that are fixed and not + related to samplings. These values can be used in the resolution process. + + Raises: + ValueError: If the resolution_root is not a Resolvable, or if the + domain_sampler is not a DomainSampler, or if the environment_values is + not a Mapping. """ if not isinstance(resolution_root, Resolvable): raise ValueError( @@ -108,30 +115,40 @@ def __init__( @property def resolution_root(self) -> Resolvable: """Get the root of the resolution. - :return: The root of the resolution, which should be a Resolvable object. + + Returns: + The root of the resolution, which should be a Resolvable object. """ return self._resolution_root @property def samplings_made(self) -> Mapping[str, Any]: """Get the samplings made during the resolution process. - :return: A mapping of paths to sampled values. + + Returns: + A mapping of paths to sampled values. """ return self._samplings_made @property def environment_values(self) -> Mapping[str, Any]: """Get the environment values that are fixed and not related to samplings. - :return: A mapping of environment variable names to their values. + + Returns: + A mapping of environment variable names to their values. """ return self._environment_values @contextlib.contextmanager def resolving(self, _obj: Any, name: str) -> Generator[None]: """Context manager for resolving an object in the current resolution context. - :param _obj: The object being resolved, can be any type. - :param name: The name of the object being resolved, used for debugging. - :raises ValueError: If the name is not a valid string. + + Args: + _obj: The object being resolved, can be any type. + name: The name of the object being resolved, used for debugging. + + Raises: + ValueError: If the name is not a valid string. """ if not name or not isinstance(name, str): raise ValueError( @@ -150,17 +167,25 @@ def resolving(self, _obj: Any, name: str) -> Generator[None]: def was_already_resolved(self, obj: Any) -> bool: """Check if the given object was already resolved in the current context. - :param obj: The object to check if it was already resolved. - :return: True if the object was already resolved, False otherwise. + + Args: + obj: The object to check if it was already resolved. + + Returns: + True if the object was already resolved, False otherwise. """ return obj in self._resolved_objects def add_resolved(self, original: Any, resolved: Any) -> None: """Add a resolved object to the context. - :param original: The original object that was resolved. - :param resolved: The resolved value of the original object. - :raises ValueError: If the original object was already resolved or if it is a - Resampled. + + Args: + original: The original object that was resolved. + resolved: The resolved value of the original object. + + Raises: + ValueError: If the original object was already resolved or if it is a + Resampled. """ if self.was_already_resolved(original): raise ValueError( @@ -177,9 +202,15 @@ def add_resolved(self, original: Any, resolved: Any) -> None: def get_resolved(self, obj: Any) -> Any: """Get the resolved value for the given object. - :param obj: The object for which to get the resolved value. - :return: The resolved value of the object. - :raises ValueError: If the object was not already resolved in the context. + + Args: + obj: The object for which to get the resolved value. + + Returns: + The resolved value of the object. + + Raises: + ValueError: If the object was not already resolved in the context. """ try: return self._resolved_objects[obj] @@ -190,10 +221,16 @@ def get_resolved(self, obj: Any) -> Any: def sample_from(self, domain_obj: Domain) -> Any: """Sample a value from the given domain object. - :param domain_obj: The domain object from which to sample a value. - :return: The sampled value from the domain object. - :raises ValueError: If the domain object was already resolved or if the path - has already been sampled from. + + Args: + domain_obj: The domain object from which to sample a value. + + Returns: + The sampled value from the domain object. + + Raises: + ValueError: If the domain object was already resolved or if the path + has already been sampled from. """ # Each `domain_obj` is only ever sampled from once. # This is okay and the expected behavior. @@ -236,9 +273,15 @@ def sample_from(self, domain_obj: Domain) -> Any: def get_value_from_environment(self, var_name: str) -> Any: """Get a value from the environment variables. - :param var_name: The name of the environment variable to get the value from. - :return: The value of the environment variable. - :raises ValueError: If the environment variable is not found in the context. + + Args: + var_name: The name of the environment variable to get the value from. + + Returns: + The value of the environment variable. + + Raises: + ValueError: If the environment variable is not found in the context. """ try: return self._environment_values[var_name] @@ -252,11 +295,6 @@ class SamplingResolver: """A class responsible for resolving samplings in a NePS space. It uses a SamplingResolutionContext to manage the resolution process, and a DomainSampler to sample values from Domain objects. - :param resolver: The resolver to use for resolving objects. - This should be a callable that takes an object and a context and returns the resolved - object. - :raises ValueError: If the resolver is not a callable or if it is not a - DomainSampler or a SamplingResolutionContext. """ def __call__( @@ -267,14 +305,20 @@ def __call__( ) -> tuple[Resolvable, SamplingResolutionContext]: """Resolve the given object in the context of the provided domain sampler and environment values. - :param obj: The Resolvable object to resolve. - :param domain_sampler: The DomainSampler to use for sampling from Domain objects. - :param environment_values: A mapping of environment values that are fixed and not - related to samplings. - :return: A tuple containing the resolved object and the - SamplingResolutionContext. - :raises ValueError: If the object is not a Resolvable, or if the domain_sampler - is not a DomainSampler, or if the environment_values is not a Mapping. + + Args: + obj: The Resolvable object to resolve. + domain_sampler: The DomainSampler to use for sampling from Domain objects. + environment_values: A mapping of environment values that are fixed and not + related to samplings. + + Returns: + A tuple containing the resolved object and the + SamplingResolutionContext. + + Raises: + ValueError: If the object is not a Resolvable, or if the domain_sampler + is not a DomainSampler, or if the environment_values is not a Mapping. """ context = SamplingResolutionContext( resolution_root=obj, @@ -581,14 +625,20 @@ def resolve( environment_values: Mapping[str, Any] | None = None, ) -> tuple[P, SamplingResolutionContext]: """Resolve a NePS pipeline with the given domain sampler and environment values. - :param pipeline: The pipeline to resolve, which should be a Pipeline object. - :param domain_sampler: The DomainSampler to use for sampling from Domain objects. - If None, a RandomSampler with no predefined values will be used. - :param environment_values: A mapping of environment variable names to their values. - If None, an empty mapping will be used. - :return: A tuple containing the resolved pipeline and the SamplingResolutionContext. - :raises ValueError: If the pipeline is not a Pipeline object or if the domain_sampler - is not a DomainSampler or if the environment_values is not a Mapping. + + Args: + pipeline: The pipeline to resolve, which should be a Pipeline object. + domain_sampler: The DomainSampler to use for sampling from Domain objects. + If None, a RandomSampler with no predefined values will be used. + environment_values: A mapping of environment variable names to their values. + If None, an empty mapping will be used. + + Returns: + A tuple containing the resolved pipeline and the SamplingResolutionContext. + + Raises: + ValueError: If the pipeline is not a Pipeline object or if the domain_sampler + is not a DomainSampler or if the environment_values is not a Mapping. """ if domain_sampler is None: # By default, use a random sampler with no predefined values. @@ -612,9 +662,15 @@ def resolve( def convert_operation_to_callable(operation: Operation) -> Callable: """Convert an Operation to a callable that can be executed. - :param operation: The Operation to convert. - :return: A callable that represents the operation. - :raises ValueError: If the operation is not a valid Operation object. + + Args: + operation: The Operation to convert. + + Returns: + A callable that represents the operation. + + Raises: + ValueError: If the operation is not a valid Operation object. """ operator = cast(Callable, operation.operator) @@ -668,9 +724,15 @@ def _operation_to_unwrapped_config( def convert_operation_to_string(operation: Operation) -> str: """Convert an Operation to a string representation. - :param operation: The Operation to convert. - :return: A string representation of the operation. - :raises ValueError: If the operation is not a valid Operation object. + + Args: + operation: The Operation to convert. + + Returns: + A string representation of the operation. + + Raises: + ValueError: If the operation is not a valid Operation object. """ unwrapped_config = tuple(_operation_to_unwrapped_config(operation)) return config_string.wrap_config_into_string(unwrapped_config) @@ -683,8 +745,6 @@ class NepsCompatConverter: """A class to convert between NePS configurations and NEPS-compatible configurations. It provides methods to convert a SamplingResolutionContext to a NEPS-compatible config and to convert a NEPS-compatible config back to a SamplingResolutionContext. - :param resolution_context: The SamplingResolutionContext to convert. - :raises ValueError: If the resolution_context is not a SamplingResolutionContext. """ _SAMPLING_PREFIX = "SAMPLING__" @@ -704,9 +764,15 @@ def to_neps_config( resolution_context: SamplingResolutionContext, ) -> Mapping[str, Any]: """Convert a SamplingResolutionContext to a NEPS-compatible config. - :param resolution_context: The SamplingResolutionContext to convert. - :return: A mapping of NEPS-compatible configuration keys to their values. - :raises ValueError: If the resolution_context is not a SamplingResolutionContext. + + Args: + resolution_context: The SamplingResolutionContext to convert. + + Returns: + A mapping of NEPS-compatible configuration keys to their values. + + Raises: + ValueError: If the resolution_context is not a SamplingResolutionContext. """ config: dict[str, Any] = {} @@ -726,10 +792,16 @@ def from_neps_config( config: Mapping[str, Any], ) -> _FromNepsConfigResult: """Convert a NEPS-compatible config to a SamplingResolutionContext. - :param config: A mapping of NEPS-compatible configuration keys to their values. - :return: A _FromNepsConfigResult containing predefined samplings, - environment values, and extra kwargs. - :raises ValueError: If the config is not a valid NEPS-compatible config. + + Args: + config: A mapping of NEPS-compatible configuration keys to their values. + + Returns: + A _FromNepsConfigResult containing predefined samplings, + environment values, and extra kwargs. + + Raises: + ValueError: If the config is not a valid NEPS-compatible config. """ predefined_samplings = {} environment_values = {} @@ -784,13 +856,19 @@ def adjust_evaluation_pipeline_for_neps_space( """Adjust the evaluation pipeline to work with a NePS space. This function wraps the evaluation pipeline to sample from the NePS space and convert the sampled pipeline to a format compatible with the evaluation pipeline. - :param evaluation_pipeline: The evaluation pipeline to adjust. - :param pipeline_space: The NePS pipeline space to sample from. - :param operation_converter: A callable to convert Operation objects to a format - compatible with the evaluation pipeline. - :return: A wrapped evaluation pipeline that samples from the NePS space. - :raises ValueError: If the evaluation_pipeline is not callable or if the - pipeline_space is not a Pipeline object. + + Args: + evaluation_pipeline: The evaluation pipeline to adjust. + pipeline_space: The NePS pipeline space to sample from. + operation_converter: A callable to convert Operation objects to a format + compatible with the evaluation pipeline. + + Returns: + A wrapped evaluation pipeline that samples from the NePS space. + + Raises: + ValueError: If the evaluation_pipeline is not callable or if the + pipeline_space is not a Pipeline object. """ @functools.wraps(evaluation_pipeline) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 90cdac683..d0bbfd3f9 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -11,7 +11,7 @@ import math import random from collections.abc import Callable, Mapping, Sequence -from typing import Any, Generic, Protocol, TypeVar, cast, runtime_checkable +from typing import Any, Generic, Literal, Protocol, TypeVar, cast, runtime_checkable T = TypeVar("T") @@ -194,6 +194,26 @@ class ConfidenceLevel(enum.Enum): HIGH = "high" +def convert_confidence_level(confidence: str) -> ConfidenceLevel: + """Convert a string representation of confidence level to ConfidenceLevel enum. + + Args: + confidence: A string representing the confidence level, e.g., "low", "medium", + "high". + + Returns: + ConfidenceLevel: The corresponding ConfidenceLevel enum value. + + Raises: + ValueError: If the input string does not match any of the defined confidence + levels. + """ + try: + return ConfidenceLevel[confidence.upper()] + except KeyError as e: + raise ValueError(f"Invalid confidence level: {confidence}") from e + + class Domain(Resolvable, abc.ABC, Generic[T]): """An abstract base class representing a domain in NePS spaces.""" @@ -370,21 +390,23 @@ class Categorical(Domain[int], Generic[T]): Attributes: choices: A tuple of choices or a Domain of choices. - prior_index: The index of the prior choice in the choices tuple. + prior: The index of the prior choice in the choices tuple. prior_confidence: The confidence level of the prior choice. """ def __init__( self, choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T], - prior_index: int | Domain[int] | _Unset = _UNSET, - prior_confidence: ConfidenceLevel | _Unset = _UNSET, + prior: int | Domain[int] | _Unset = _UNSET, + prior_confidence: ( + ConfidenceLevel | Literal["low", "medium", "high"] | _Unset + ) = _UNSET, ): """Initialize the Categorical domain with choices and optional prior. Args: choices: A tuple of choices or a Domain of choices. - prior_index: The index of the prior choice in the choices tuple. + prior: The index of the prior choice in the choices tuple. prior_confidence: The confidence level of the prior choice. """ @@ -393,8 +415,12 @@ def __init__( self._choices = tuple(choice for choice in choices) else: self._choices = choices - self._prior_index = prior_index - self._prior_confidence = prior_confidence + self._prior = prior + self._prior_confidence = ( + convert_confidence_level(prior_confidence) + if isinstance(prior_confidence, str) + else prior_confidence + ) @property def min_value(self) -> int: @@ -432,9 +458,9 @@ def has_prior(self) -> bool: """Check if the categorical domain has a prior defined. Returns: - True if the prior index and prior confidence are set, False otherwise. + True if the prior and prior confidence are set, False otherwise. """ - return self._prior_index is not _UNSET and self._prior_confidence is not _UNSET + return self._prior is not _UNSET and self._prior_confidence is not _UNSET @property def prior(self) -> int: @@ -449,7 +475,7 @@ def prior(self) -> int: """ if not self.has_prior: raise ValueError("Domain has no prior defined.") - return int(cast(int, self._prior_index)) + return int(cast(int, self._prior)) @property def prior_confidence(self) -> ConfidenceLevel: @@ -522,7 +548,7 @@ def centered_around( new_choices = cast(tuple, self._choices)[new_min : new_max + 1] return Categorical( choices=new_choices, - prior_index=new_choices.index(cast(tuple, self._choices)[center]), + prior=new_choices.index(cast(tuple, self._choices)[center]), prior_confidence=confidence, ) @@ -544,7 +570,9 @@ def __init__( max_value: float, log: bool = False, # noqa: FBT001, FBT002 prior: float | _Unset = _UNSET, - prior_confidence: ConfidenceLevel | _Unset = _UNSET, + prior_confidence: ( + Literal["low", "medium", "high"] | ConfidenceLevel | _Unset + ) = _UNSET, ): """Initialize the Float domain with min and max values, and optional prior. @@ -560,7 +588,11 @@ def __init__( self._max_value = max_value self._log = log self._prior = prior - self._prior_confidence = prior_confidence + self._prior_confidence = ( + convert_confidence_level(prior_confidence) + if isinstance(prior_confidence, str) + else prior_confidence + ) @property def min_value(self) -> float: @@ -709,7 +741,9 @@ def __init__( max_value: int, log: bool = False, # noqa: FBT001, FBT002 prior: int | _Unset = _UNSET, - prior_confidence: ConfidenceLevel | _Unset = _UNSET, + prior_confidence: ( + Literal["low", "medium", "high"] | ConfidenceLevel | _Unset + ) = _UNSET, ): """Initialize the Integer domain with min and max values, and optional prior. @@ -724,7 +758,11 @@ def __init__( self._max_value = max_value self._log = log self._prior = prior - self._prior_confidence = prior_confidence + self._prior_confidence = ( + convert_confidence_level(prior_confidence) + if isinstance(prior_confidence, str) + else prior_confidence + ) @property def min_value(self) -> int: @@ -811,7 +849,7 @@ def sample(self) -> int: Raises: NotImplementedError: If the domain is set to sample on a logarithmic - scale, as this is not implemented yet. + scale, as this is not implemented yet. """ if self._log: @@ -876,7 +914,7 @@ def __init__( Args: operator: The operator to be used in the operation, can be a callable or a - string. + string. args: A sequence of arguments to be passed to the operator. kwargs: A mapping of keyword arguments to be passed to the operator. @@ -992,7 +1030,7 @@ class Resampled(Resolvable): Attributes: source: The source of the resampling, which can be a resolvable object or a - string. + string. """ def __init__(self, source: Resolvable | str): diff --git a/neps/space/neps_spaces/sampling.py b/neps/space/neps_spaces/sampling.py index 56f7d7109..a0ed14704 100644 --- a/neps/space/neps_spaces/sampling.py +++ b/neps/space/neps_spaces/sampling.py @@ -29,10 +29,16 @@ def __call__( current_path: str, ) -> T: """Sample a value from the given domain. - :param domain_obj: The domain object to sample from. - :param current_path: The current path in the resolution context. - :return: A sampled value of type T from the domain. - :raises NotImplementedError: If the method is not implemented. + + Args: + domain_obj: The domain object to sample from. + current_path: The current path in the resolution context. + + Returns: + A sampled value of type T from the domain. + + Raises: + NotImplementedError: If the method is not implemented. """ raise NotImplementedError() @@ -40,7 +46,9 @@ def __call__( class OnlyPredefinedValuesSampler(DomainSampler): """A sampler that only returns predefined values for a given path. If the path is not found in the predefined values, it raises a ValueError. - :param predefined_samplings: A mapping of paths to predefined values. + + Args: + predefined_samplings: A mapping of paths to predefined values. """ def __init__( @@ -48,8 +56,12 @@ def __init__( predefined_samplings: Mapping[str, Any], ): """Initialize the sampler with predefined samplings. - :param predefined_samplings: A mapping of paths to predefined values. - :raises ValueError: If predefined_samplings is empty. + + Args: + predefined_samplings: A mapping of paths to predefined values. + + Raises: + ValueError: If predefined_samplings is empty. """ self._predefined_samplings = predefined_samplings @@ -60,10 +72,16 @@ def __call__( current_path: str, ) -> T: """Sample a value from the predefined samplings for the given path. - :param domain_obj: The domain object, not used in this sampler. - :param current_path: The path for which to sample a value. - :return: The predefined value for the given path. - :raises ValueError: If the current path is not in the predefined samplings. + + Args: + domain_obj: The domain object, not used in this sampler. + current_path: The path for which to sample a value. + + Returns: + The predefined value for the given path. + + Raises: + ValueError: If the current path is not in the predefined samplings. """ if current_path not in self._predefined_samplings: raise ValueError(f"No predefined value for path: {current_path!r}.") @@ -73,9 +91,11 @@ def __call__( class RandomSampler(DomainSampler): """A sampler that randomly samples from a predefined set of values. If the current path is not in the predefined values, it samples from the domain. - :param predefined_samplings: A mapping of paths to predefined values. - This sampler will use these values if available, otherwise it will sample from the - domain. + + Args: + predefined_samplings: A mapping of paths to predefined values. + This sampler will use these values if available, otherwise it will sample + from the domain. """ def __init__( @@ -83,8 +103,11 @@ def __init__( predefined_samplings: Mapping[str, Any], ): """Initialize the sampler with predefined samplings. - :param predefined_samplings: A mapping of paths to predefined values. - :raises + + Args: + predefined_samplings: A mapping of paths to predefined values. + + Raises: ValueError: If predefined_samplings is empty. """ self._predefined_samplings = predefined_samplings @@ -96,12 +119,18 @@ def __call__( current_path: str, ) -> T: """Sample a value from the predefined samplings or the domain. - :param domain_obj: The domain object from which to sample. - :param current_path: The path for which to sample a value. - :return: A sampled value, either from the predefined samplings or from the - domain. - :raises ValueError: If the current path is not in the predefined samplings and - the domain does not have a prior defined. + + Args: + domain_obj: The domain object from which to sample. + current_path: The path for which to sample a value. + + Returns: + A sampled value, either from the predefined samplings or from the + domain. + + Raises: + ValueError: If the current path is not in the predefined samplings and + the domain does not have a prior defined. """ if current_path not in self._predefined_samplings: sampled_value = domain_obj.sample() @@ -113,12 +142,15 @@ def __call__( class PriorOrFallbackSampler(DomainSampler): """A sampler that uses a prior value if available, otherwise falls back to another sampler. - :param fallback_sampler: A DomainSampler to use if the prior is not available. - :param prior_use_probability: The probability of using the prior value when - available. - This should be a float between 0 and 1, where 0 means never use the prior and 1 means - always use it. - :raises ValueError: If the prior_use_probability is not between 0 and 1. + + Args: + fallback_sampler: A DomainSampler to use if the prior is not available. + prior_use_probability: The probability of using the prior value when + available. This should be a float between 0 and 1, where 0 means never use + the prior and 1 means always use it. + + Raises: + ValueError: If the prior_use_probability is not between 0 and 1. """ def __init__( @@ -127,12 +159,15 @@ def __init__( prior_use_probability: float, ): """Initialize the sampler with a fallback sampler and a prior use probability. - :param fallback_sampler: A DomainSampler to use if the prior is not available. - :param prior_use_probability: The probability of using the prior value when - available. - This should be a float between 0 and 1, where 0 means never use the prior and 1 - means always use it. - :raises ValueError: If the prior_use_probability is not between 0 and 1. + + Args: + fallback_sampler: A DomainSampler to use if the prior is not available. + prior_use_probability: The probability of using the prior value when + available. This should be a float between 0 and 1, where 0 means never + use the prior and 1 means always use it. + + Raises: + ValueError: If the prior_use_probability is not between 0 and 1. """ if not 0 <= prior_use_probability <= 1: raise ValueError( @@ -151,11 +186,17 @@ def __call__( ) -> T: """Sample a value from the domain, using the prior if available and according to the prior use probability. - :param domain_obj: The domain object from which to sample. - :param current_path: The path for which to sample a value. - :return: A sampled value, either from the prior or from the fallback sampler. - :raises ValueError: If the domain does not have a prior defined and the fallback - sampler is not provided. + + Args: + domain_obj: The domain object from which to sample. + current_path: The path for which to sample a value. + + Returns: + A sampled value, either from the prior or from the fallback sampler. + + Raises: + ValueError: If the domain does not have a prior defined and the fallback + sampler is not provided. """ use_prior = random.choices( (True, False), @@ -191,12 +232,16 @@ class MutateByForgettingSampler(DomainSampler): """A sampler that mutates predefined samplings by forgetting a certain number of them. It randomly selects a number of predefined samplings to forget and returns a new sampler that only uses the remaining samplings. - :param predefined_samplings: A mapping of paths to predefined values. - :param n_forgets: The number of predefined samplings to forget. - This should be an integer greater than 0 and less than or equal to the number of - predefined samplings. - :raises ValueError: If n_forgets is not a valid integer or if it exceeds the number - of predefined samplings. + + Args: + predefined_samplings: A mapping of paths to predefined values. + n_forgets: The number of predefined samplings to forget. + This should be an integer greater than 0 and less than or equal to the + number of predefined samplings. + + Raises: + ValueError: If n_forgets is not a valid integer or if it exceeds the number + of predefined samplings. """ def __init__( @@ -205,12 +250,16 @@ def __init__( n_forgets: int, ): """Initialize the sampler with predefined samplings and a number of forgets. - :param predefined_samplings: A mapping of paths to predefined values. - :param n_forgets: The number of predefined samplings to forget. - This should be an integer greater than 0 and less than or equal to the number of - predefined samplings. - :raises ValueError: If n_forgets is not a valid integer or if it exceeds the - number of predefined samplings. + + Args: + predefined_samplings: A mapping of paths to predefined values. + n_forgets: The number of predefined samplings to forget. + This should be an integer greater than 0 and less than or equal to the + number of predefined samplings. + + Raises: + ValueError: If n_forgets is not a valid integer or if it exceeds the + number of predefined samplings. """ if ( not isinstance(n_forgets, int) @@ -235,12 +284,18 @@ def __call__( current_path: str, ) -> T: """Sample a value from the mutated predefined samplings or the domain. - :param domain_obj: The domain object from which to sample. - :param current_path: The path for which to sample a value. - :return: A sampled value, either from the mutated predefined samplings or from - the domain. - :raises ValueError: If the current path is not in the mutated predefined - samplings and the domain does not have a prior defined. + + Args: + domain_obj: The domain object from which to sample. + current_path: The path for which to sample a value. + + Returns: + A sampled value, either from the mutated predefined samplings or from + the domain. + + Raises: + ValueError: If the current path is not in the mutated predefined + samplings and the domain does not have a prior defined. """ return self._random_sampler(domain_obj=domain_obj, current_path=current_path) @@ -248,12 +303,16 @@ def __call__( class MutatateUsingCentersSampler(DomainSampler): """A sampler that mutates predefined samplings by forgetting a certain number of them, but still uses the original values as centers for sampling. - :param predefined_samplings: A mapping of paths to predefined values. - :param n_mutations: The number of predefined samplings to mutate. - This should be an integer greater than 0 and less than or equal to the number of - predefined samplings. - :raises ValueError: If n_mutations is not a valid integer or if it exceeds the number - of predefined samplings. + + Args: + predefined_samplings: A mapping of paths to predefined values. + n_mutations: The number of predefined samplings to mutate. + This should be an integer greater than 0 and less than or equal to the number + of predefined samplings. + + Raises: + ValueError: If n_mutations is not a valid integer or if it exceeds the number + of predefined samplings. """ def __init__( @@ -262,12 +321,16 @@ def __init__( n_mutations: int, ): """Initialize the sampler with predefined samplings and a number of mutations. - :param predefined_samplings: A mapping of paths to predefined values. - :param n_mutations: The number of predefined samplings to mutate. - This should be an integer greater than 0 and less than or equal to the number of - predefined samplings. - :raises ValueError: If n_mutations is not a valid integer or if it exceeds - the number of predefined samplings. + + Args: + predefined_samplings: A mapping of paths to predefined values. + n_mutations: The number of predefined samplings to mutate. + This should be an integer greater than 0 and less than or equal to the + number of predefined samplings. + + Raises: + ValueError: If n_mutations is not a valid integer or if it exceeds + the number of predefined samplings. """ if ( not isinstance(n_mutations, int) @@ -292,12 +355,18 @@ def __call__( ) -> T: """Sample a value from the predefined samplings or the domain, using original values as centers if the current path is not in the kept samplings. - :param domain_obj: The domain object from which to sample. - :param current_path: The path for which to sample a value. - :return: A sampled value, either from the kept samplings or from the domain, - using the original values as centers if necessary. - :raises ValueError: If the current path is not in the kept samplings and the - domain does not have a prior defined. + + Args: + domain_obj: The domain object from which to sample. + current_path: The path for which to sample a value. + + Returns: + A sampled value, either from the kept samplings or from the domain, + using the original values as centers if necessary. + + Raises: + ValueError: If the current path is not in the kept samplings and the + domain does not have a prior defined. """ if current_path not in self._kept_samplings_to_make: # For this path we either have forgotten the value or we never had it. @@ -349,17 +418,20 @@ def _crossover_samplings_to_make_by_mixing( class CrossoverByMixingSampler(DomainSampler): """A sampler that performs a crossover operation by mixing two sets of predefined samplings. It combines the predefined samplings from two sources, allowing for a - probability-based - selection of values from either source. - :param predefined_samplings_1: The first set of predefined samplings. - :param predefined_samplings_2: The second set of predefined samplings. - :param prefer_first_probability: The probability of preferring values from the first - set over the second set when both have values for the same path. - This should be a float between 0 and 1, where 0 means always prefer the second set - and 1 means always prefer the first set. - :raises ValueError: If prefer_first_probability is not between 0 and 1. - :raises CrossoverNotPossibleError: If no crossovers were made between the two sets - of predefined samplings. + probability-based selection of values from either source. + + Args: + predefined_samplings_1: The first set of predefined samplings. + predefined_samplings_2: The second set of predefined samplings. + prefer_first_probability: The probability of preferring values from the first + set over the second set when both have values for the same path. + This should be a float between 0 and 1, where 0 means always prefer the + second set and 1 means always prefer the first set. + + Raises: + ValueError: If prefer_first_probability is not between 0 and 1. + CrossoverNotPossibleError: If no crossovers were made between the two sets + of predefined samplings. """ def __init__( @@ -370,13 +442,17 @@ def __init__( ): """Initialize the sampler with two sets of predefined samplings and a preference probability for the first set. - :param predefined_samplings_1: The first set of predefined samplings. - :param predefined_samplings_2: The second set of predefined samplings. - :param prefer_first_probability: The probability of preferring values from the - first set over the second set when both have values for the same path. - This should be a float between 0 and 1, where 0 means always prefer the second - set and 1 means always prefer the first set. - :raises ValueError: If prefer_first_probability is not between 0 and 1. + + Args: + predefined_samplings_1: The first set of predefined samplings. + predefined_samplings_2: The second set of predefined samplings. + prefer_first_probability: The probability of preferring values from the + first set over the second set when both have values for the same path. + This should be a float between 0 and 1, where 0 means always prefer the + second set and 1 means always prefer the first set. + + Raises: + ValueError: If prefer_first_probability is not between 0 and 1. """ if not isinstance(prefer_first_probability, float) or not ( 0 <= prefer_first_probability <= 1 @@ -409,11 +485,17 @@ def __call__( current_path: str, ) -> T: """Sample a value from the crossed-over predefined samplings or the domain. - :param domain_obj: The domain object from which to sample. - :param current_path: The path for which to sample a value. - :return: A sampled value, either from the crossed-over predefined samplings or - from the domain. - :raises ValueError: If the current path is not in the crossed-over predefined - samplings and the domain does not have a prior defined. + + Args: + domain_obj: The domain object from which to sample. + current_path: The path for which to sample a value. + + Returns: + A sampled value, either from the crossed-over predefined samplings or + from the domain. + + Raises: + ValueError: If the current path is not in the crossed-over predefined + samplings and the domain does not have a prior defined. """ return self._random_sampler(domain_obj=domain_obj, current_path=current_path) diff --git a/tests/test_neps_space/test_domain__centering.py b/tests/test_neps_space/test_domain__centering.py index 90ac6719b..37850427d 100644 --- a/tests/test_neps_space/test_domain__centering.py +++ b/tests/test_neps_space/test_domain__centering.py @@ -133,7 +133,7 @@ def test_centering_categorical( ) categorical2 = Categorical( choices=tuple(range(1, 101)), - prior_index=categorical_prior_index_original, + prior=categorical_prior_index_original, prior_confidence=confidence_level, ) @@ -271,7 +271,7 @@ def test_centering_stranger_ranges_categorical( categorical2 = Categorical( choices=tuple(range(7)), - prior_index=2, + prior=2, prior_confidence=confidence_level, ) categorical2_centered = categorical2.centered_around( diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index d59f852f4..01d2cec55 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -53,7 +53,7 @@ class DemoHyperparameterSpace(Pipeline): ) categorical = Categorical( choices=(0, 1), - prior_index=0, + prior=0, prior_confidence=ConfidenceLevel.MEDIUM, ) integer1 = Integer( @@ -85,7 +85,7 @@ class DemoHyperparameterWithFidelitySpace(Pipeline): ) categorical = Categorical( choices=(0, 1), - prior_index=0, + prior=0, prior_confidence=ConfidenceLevel.MEDIUM, ) integer1 = Integer( @@ -121,7 +121,7 @@ class DemoHyperparameterComplexSpace(Pipeline): Resampled(_small_float), Resampled(_big_float), ), - prior_index=0, + prior=0, prior_confidence=ConfidenceLevel.MEDIUM, ) float2 = Categorical( @@ -130,12 +130,12 @@ class DemoHyperparameterComplexSpace(Pipeline): Resampled(_big_float), float1, ), - prior_index=0, + prior=0, prior_confidence=ConfidenceLevel.MEDIUM, ) categorical = Categorical( choices=(0, 1), - prior_index=0, + prior=0, prior_confidence=ConfidenceLevel.MEDIUM, ) integer1 = Integer( diff --git a/tests/test_neps_space/test_search_space__reuse_arch_elements.py b/tests/test_neps_space/test_search_space__reuse_arch_elements.py index 70993256e..b8b02c771 100644 --- a/tests/test_neps_space/test_search_space__reuse_arch_elements.py +++ b/tests/test_neps_space/test_search_space__reuse_arch_elements.py @@ -65,12 +65,12 @@ class ConvPipeline(Pipeline): ) conv_choices_prior_confidence: ConfidenceLevel = Categorical( choices=_conv_choices_prior_confidence_choices, - prior_index=1, + prior=1, prior_confidence=ConfidenceLevel.LOW, ) conv_choices: tuple[str, ...] = Categorical( choices=(_conv_choices_low, _conv_choices_high), - prior_index=conv_choices_prior_index, + prior=conv_choices_prior_index, prior_confidence=conv_choices_prior_confidence, ) From 8c5e9f0e2f55de9ed6ba4ffe88a0561d18402536 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 6 Jul 2025 22:04:25 +0200 Subject: [PATCH 021/156] Add documentation for NePS Spaces framework and usage examples --- docs/reference/neps_spaces.md | 158 ++++++++++++++++++++++++++++++++++ mkdocs.yml | 1 + 2 files changed, 159 insertions(+) create mode 100644 docs/reference/neps_spaces.md diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md new file mode 100644 index 000000000..abdb422ea --- /dev/null +++ b/docs/reference/neps_spaces.md @@ -0,0 +1,158 @@ +# NePS Spaces: Joint Architecture and Hyperparameter Search + +NePS Spaces provides a powerful framework for defining and optimizing complex search spaces, enabling both and joint architecture and hyperparameter search (JAHS). + +## How to use NePS Spaces + +**NePS spaces** include all the necessary components to define a Hyperparameter Optimization (HPO) search space like: + +- [`Integer`][neps.space.neps_spaces.parameters.Integer]: Discrete integer values +- [`Float`][neps.space.neps_spaces.parameters.Float]: Continuous float values +- [`Categorical`][neps.space.neps_spaces.parameters.Categorical]: Discrete categorical values +- [`Fidelity`][neps.space.neps_spaces.parameters.Fidelity]: Special type for float or integer, [multi-fidelity](../reference/search_algorithms/multifidelity.md) parameters (e.g., epochs, dataset size) + +Additionally, **NePS spaces** can describe complex (hierarchical) architectures using: + +- [`Operation`][neps.space.neps_spaces.parameters.Operation]: Define operations (e.g., convolution, pooling, activation) with arguments +- [`Resampled`][neps.space.neps_spaces.parameters.Resampled]: Resample other parameters + +### Simple spaces + +A **NePS space** is defined as a child class of [`Pipeline`][neps.space.neps_spaces.parameters.Pipeline]: + +```python + +def pipeline_space(Pipeline): +``` + +We can then define the hyperparameters that make up the space, like so: + +```python + + float_param = Float(min_value=0.1, max_value=1.0) + int_param = Integer(min_value=1, max_value=10) + cat_param = Categorical(choices=["A", "B", "C"]) +``` + +### Using your knowledge, providing a Prior + +You can provide **your knowledge about where a good value for this parameter lies** by indicating a `prior=`. You can also specify a `prior_confidence=` to indicate how strongly you want NePS to focus on these, one of either `"low"`, `"medium"`, or `"high"`: + +```python + # Categorical parameters can also choose between other parameters + # Here the float parameter (index 0) is used as a prior + float_or_int = Categorical(choices=(float_param, int_param), prior=0, prior_confidence="high") +``` + +### Resampling and Hierarchies + +You can also resample parameters to use for other parameters, even themselves recursively, with [`Resampled`][neps.space.neps_spaces.parameters.Resampled]: + +```python + # The new parameter will have the same range but will be resampled + # independently, so it can take different values than its source + resampled_float = Resampled(source=float_param) + + # If you only use a parameter to resample from it, prefix it with an underscore + # This way, your evaluation function will not receive it as an argument + _float = Float(min_value=1, max_value=3) + resampled_float_2 = Resampled(source=_float) +``` + +??? tip "Self- and future references" + + When referencing itself or a not yet defined parameter use a string of that parameters name + + ```python + self_reference = Categorical(choices=(Resampled("self_reference"), Resampled("next_param"))) + next_param = Float(min_value=0, max_value=5) + ``` + +### Operators and Architectures + +Combining [resampling](#resampling-and-hierarchies) and [operations][neps.space.neps_spaces.parameters.Operation] allows you to define complex architectures akin to [Context-Free Grammars (CFGs)](https://en.wikipedia.org/wiki/Context-free_grammar). + +Operations can be strings or more importantly Callables, (e.g. pytorch objects) for which you can define the arguments as parameters: + +```python + +from torch.nn import Sequential, Conv2d, ReLU + +class NN_Space(Pipeline): + + # Define an operation for a ReLU activation + _relu = Operation(operator=ReLU) + + # Define a convolution operation with an optimizable kernel size parameter + _convolution = Operation( + operator=Conv2d, + kwargs={"kernel_size": Integer(min_value=1, max_value=10)} + # You could also define _kernel_size separately and use Resampled + ) + + _model_args = Categorical( + choices=( + # The Sequential will either get a convolution followed by a ReLU + (Resampled(_convolution), _relu,), + # Or two (different, hence the resampling) convolutions + (Resampled(_convolution), Resampled(_convolution)), + # Or just a ReLU activation + (_relu,), + ) + ) + + # Define a sequential operation, using the previously defined _model_args + # This model will be the only parameter passed to the evaluation function + model = Operation( + operator=Sequential, + args=_model_args + ) +``` + +??? warning "Tuples as choice" + + When using a tuple as one of the choices in a `Categorical`, all choices must be tuples. + +## Using NePS Spaces + +To use a NePS space, pass it as the `pipeline_space` argument to the `neps.run()` function: + +```python +import neps +neps.run( + ..., + pipeline_space=NN_Space() +) +``` + +!!! tip "NePS Space-compatible optimizers" + + Currently, NePS Spaces is compatible with these optimizers, which can be imported from [neps.optimizers.neps_algorithms][neps.optimizers.neps_algorithms--neps-algorithms]: + + - [`Random Search`][neps.optimizers.neps_algorithms.neps_random_search], which can sample the space uniformly at random + - [`Complex Random Search`][neps.optimizers.neps_algorithms.neps_complex_random_search], which can sample the space uniformly at random, using priors and mutating previously sampled configurations + - [`PriorBand`][neps.optimizers.neps_algorithms.neps_priorband], which uses [multi-fidelity](./search_algorithms/multifidelity.md) and the prior knowledge encoded in the NePS space + +## Inspecting Configurations + +NePS saves the configurations as paths, where each sampling decision is recorded. As they are hard to read, so you can load the configuration from the `results` directory using the [`NepsCompatConverter`][neps.space.neps_spaces.neps_space.NepsCompatConverter] class, which converts the configuration such that it can be used with the NePS Spaces API: + +```python +from neps.space.neps_spaces import neps_space +import yaml + +with open("Path/to/config.yaml", "r") as f: + conf_dict = yaml.safe_load(f) +resolution_context = NepsCompatConverter.from_neps_config(conf_dict) + +# Use the resolution context to sample the configuration using a +# Sampler that follows the instructions in the configuration +resolved_pipeline, resolution_context = neps_space.resolve(pipeline=NN_Space(), + # Predefined samplings are the decisions made at each sampling step + domain_sampler=neps_space.OnlyPredefinedValuesSampler(predefined_samplings=config.predefined_samplings), + # Environment values are the fidelities and any arguments of the evaluation function not part of the search space + environment_values=config.environment_values) + +# The resolved_pipeline now contains all the parameters and their values, e.g. the Callable model +model_callable = neps_space.convert_operation_to_callable(operation=resolved_pipeline.model) +``` diff --git a/mkdocs.yml b/mkdocs.yml index f10fe23b8..efb481a56 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -140,6 +140,7 @@ nav: - Reference: - Run: 'reference/neps_run.md' - Search Space: 'reference/pipeline_space.md' + - NePS-Spaces: 'reference/neps_spaces.md' - The Evaluate Function: 'reference/evaluate_pipeline.md' - Analysing Runs: 'reference/analyse.md' - Optimizer: 'reference/optimizers.md' From a8f641a1b4ec18a43a277b4e84cb9dd3e1fdd837 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 6 Jul 2025 22:05:14 +0200 Subject: [PATCH 022/156] Fix indentation in docstring for clarity in SamplingResolutionContext and convert_confidence_level function --- neps/space/neps_spaces/neps_space.py | 2 +- neps/space/neps_spaces/parameters.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index a8eaed5c0..aa3c57bd6 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -48,7 +48,7 @@ class SamplingResolutionContext: Raises: ValueError: If the resolution_root is not a Resolvable, or if the domain_sampler - is not a DomainSampler, or if the environment_values is not a Mapping. + is not a DomainSampler, or if the environment_values is not a Mapping. """ def __init__( diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index d0bbfd3f9..9abc5edc5 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -199,14 +199,14 @@ def convert_confidence_level(confidence: str) -> ConfidenceLevel: Args: confidence: A string representing the confidence level, e.g., "low", "medium", - "high". + "high". Returns: ConfidenceLevel: The corresponding ConfidenceLevel enum value. Raises: ValueError: If the input string does not match any of the defined confidence - levels. + levels. """ try: return ConfidenceLevel[confidence.upper()] From 2243da7bc5c129942725394b67b90a9a94be97c5 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 6 Jul 2025 22:24:25 +0200 Subject: [PATCH 023/156] Improve documentation clarity in NePS Spaces by refining section titles and enhancing links to related topics --- docs/reference/neps_spaces.md | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index abdb422ea..9cdf11685 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -2,21 +2,21 @@ NePS Spaces provides a powerful framework for defining and optimizing complex search spaces, enabling both and joint architecture and hyperparameter search (JAHS). -## How to use NePS Spaces +## Constructing NePS Spaces -**NePS spaces** include all the necessary components to define a Hyperparameter Optimization (HPO) search space like: +**NePS spaces** include all the necessary components to define a [Hyperparameter Optimization (HPO) search space](#hpo-search-spaces) like: - [`Integer`][neps.space.neps_spaces.parameters.Integer]: Discrete integer values - [`Float`][neps.space.neps_spaces.parameters.Float]: Continuous float values - [`Categorical`][neps.space.neps_spaces.parameters.Categorical]: Discrete categorical values - [`Fidelity`][neps.space.neps_spaces.parameters.Fidelity]: Special type for float or integer, [multi-fidelity](../reference/search_algorithms/multifidelity.md) parameters (e.g., epochs, dataset size) -Additionally, **NePS spaces** can describe complex (hierarchical) architectures using: +Additionally, **NePS spaces** can describe [complex (hierarchical) architectures](#hierarchies-and-architectures) using: - [`Operation`][neps.space.neps_spaces.parameters.Operation]: Define operations (e.g., convolution, pooling, activation) with arguments - [`Resampled`][neps.space.neps_spaces.parameters.Resampled]: Resample other parameters -### Simple spaces +### HPO Search Spaces A **NePS space** is defined as a child class of [`Pipeline`][neps.space.neps_spaces.parameters.Pipeline]: @@ -44,16 +44,18 @@ You can provide **your knowledge about where a good value for this parameter lie float_or_int = Categorical(choices=(float_param, int_param), prior=0, prior_confidence="high") ``` -### Resampling and Hierarchies +### Hierarchies and Architectures -You can also resample parameters to use for other parameters, even themselves recursively, with [`Resampled`][neps.space.neps_spaces.parameters.Resampled]: +[Resampling][neps.space.neps_spaces.parameters.Resampled] and [operations][neps.space.neps_spaces.parameters.Operation] allow you to define complex architectures akin to [Context-Free Grammars (CFGs)](https://en.wikipedia.org/wiki/Context-free_grammar). + +With `Resampled` you can reuse parameters in for other parameters, even themselves recursively: ```python # The new parameter will have the same range but will be resampled # independently, so it can take different values than its source resampled_float = Resampled(source=float_param) - # If you only use a parameter to resample from it, prefix it with an underscore + # If you only use a parameter to resample from it later, prefix it with an underscore # This way, your evaluation function will not receive it as an argument _float = Float(min_value=1, max_value=3) resampled_float_2 = Resampled(source=_float) @@ -61,18 +63,14 @@ You can also resample parameters to use for other parameters, even themselves re ??? tip "Self- and future references" - When referencing itself or a not yet defined parameter use a string of that parameters name + When referencing itself or a not yet defined parameter use a string of that parameters name: ```python self_reference = Categorical(choices=(Resampled("self_reference"), Resampled("next_param"))) next_param = Float(min_value=0, max_value=5) ``` -### Operators and Architectures - -Combining [resampling](#resampling-and-hierarchies) and [operations][neps.space.neps_spaces.parameters.Operation] allows you to define complex architectures akin to [Context-Free Grammars (CFGs)](https://en.wikipedia.org/wiki/Context-free_grammar). - -Operations can be strings or more importantly Callables, (e.g. pytorch objects) for which you can define the arguments as parameters: +Operations can be Callables, (e.g. pytorch objects) whose arguments can themselves be parameters: ```python From b6a7b304ee09b976319d895616b688058d90ee70 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 7 Jul 2025 20:18:54 +0200 Subject: [PATCH 024/156] Refine documentation in NePS Spaces by correcting terminology, enhancing clarity, and improving formatting in code examples --- docs/reference/neps_spaces.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 9cdf11685..58da052ea 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -18,14 +18,14 @@ Additionally, **NePS spaces** can describe [complex (hierarchical) architectures ### HPO Search Spaces -A **NePS space** is defined as a child class of [`Pipeline`][neps.space.neps_spaces.parameters.Pipeline]: +A **NePS space** is defined as a subclass of [`Pipeline`][neps.space.neps_spaces.parameters.Pipeline]: ```python -def pipeline_space(Pipeline): +class pipeline_space(Pipeline): ``` -We can then define the hyperparameters that make up the space, like so: +Here we define the hyperparameters that make up the space, like so: ```python @@ -34,15 +34,15 @@ We can then define the hyperparameters that make up the space, like so: cat_param = Categorical(choices=["A", "B", "C"]) ``` -### Using your knowledge, providing a Prior +!!! tip "**Using your knowledge, providing a Prior**" -You can provide **your knowledge about where a good value for this parameter lies** by indicating a `prior=`. You can also specify a `prior_confidence=` to indicate how strongly you want NePS to focus on these, one of either `"low"`, `"medium"`, or `"high"`: + You can provide **your knowledge about where a good value for this parameter lies** by indicating a `prior=`. You can also specify a `prior_confidence=` to indicate how strongly you want NePS to focus on these, one of either `"low"`, `"medium"`, or `"high"`: -```python - # Categorical parameters can also choose between other parameters - # Here the float parameter (index 0) is used as a prior - float_or_int = Categorical(choices=(float_param, int_param), prior=0, prior_confidence="high") -``` + ```python + # Categorical parameters can also choose between other parameters + # Here the float parameter (index 0) is used as a prior + float_or_int = Categorical(choices=(float_param, int_param), prior=0, prior_confidence="high") + ``` ### Hierarchies and Architectures @@ -61,7 +61,7 @@ With `Resampled` you can reuse parameters in for other parameters, even themselv resampled_float_2 = Resampled(source=_float) ``` -??? tip "Self- and future references" +??? info "Self- and future references" When referencing itself or a not yet defined parameter use a string of that parameters name: @@ -109,7 +109,7 @@ class NN_Space(Pipeline): ??? warning "Tuples as choice" - When using a tuple as one of the choices in a `Categorical`, all choices must be tuples. + When using a tuple as one of the choices in a `Categorical`, all choices must be tuples, as in the example above with ```(_relu,)```. ## Using NePS Spaces @@ -123,7 +123,7 @@ neps.run( ) ``` -!!! tip "NePS Space-compatible optimizers" +!!! abstract "NePS Space-compatible optimizers" Currently, NePS Spaces is compatible with these optimizers, which can be imported from [neps.optimizers.neps_algorithms][neps.optimizers.neps_algorithms--neps-algorithms]: From 0a5a7e13b869327aae2ef7f16d64a5d566937c49 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 7 Jul 2025 22:19:42 +0200 Subject: [PATCH 025/156] Add neps_algorithms as direct sub-import of neps --- docs/reference/neps_spaces.md | 2 +- neps/__init__.py | 3 +- neps/optimizers/neps_algorithms.py | 2 - .../neps_spaces/pytorch_nn_example.ipynb | 261 +++++++----------- 4 files changed, 109 insertions(+), 159 deletions(-) diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 58da052ea..92724d7f5 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -125,7 +125,7 @@ neps.run( !!! abstract "NePS Space-compatible optimizers" - Currently, NePS Spaces is compatible with these optimizers, which can be imported from [neps.optimizers.neps_algorithms][neps.optimizers.neps_algorithms--neps-algorithms]: + Currently, NePS Spaces is compatible with these optimizers, which can be imported from [neps.neps_algorithms][neps.optimizers.neps_algorithms--neps-algorithms]: - [`Random Search`][neps.optimizers.neps_algorithms.neps_random_search], which can sample the space uniformly at random - [`Complex Random Search`][neps.optimizers.neps_algorithms.neps_complex_random_search], which can sample the space uniformly at random, using priors and mutating previously sampled configurations diff --git a/neps/__init__.py b/neps/__init__.py index 3554291c9..754f7d975 100644 --- a/neps/__init__.py +++ b/neps/__init__.py @@ -6,7 +6,7 @@ """ from neps.api import run -from neps.optimizers import algorithms +from neps.optimizers import algorithms, neps_algorithms from neps.optimizers.ask_and_tell import AskAndTell from neps.optimizers.optimizer import SampledConfig from neps.plot.plot import plot @@ -28,6 +28,7 @@ "Trial", "algorithms", "load_yamls", + "neps_algorithms", "plot", "run", "status", diff --git a/neps/optimizers/neps_algorithms.py b/neps/optimizers/neps_algorithms.py index 3a5cf65eb..1fd4ba120 100644 --- a/neps/optimizers/neps_algorithms.py +++ b/neps/optimizers/neps_algorithms.py @@ -10,8 +10,6 @@ from functools import partial from typing import TYPE_CHECKING, Any, Literal -import pandas as pd - from neps.optimizers.neps_bracket_optimizer import _NePSBracketOptimizer from neps.optimizers.neps_priorband import NePSPriorBandSampler from neps.optimizers.neps_random_search import NePSComplexRandomSearch, NePSRandomSearch diff --git a/neps_examples/neps_spaces/pytorch_nn_example.ipynb b/neps_examples/neps_spaces/pytorch_nn_example.ipynb index 73e63d7a2..eaf91acc3 100644 --- a/neps_examples/neps_spaces/pytorch_nn_example.ipynb +++ b/neps_examples/neps_spaces/pytorch_nn_example.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 5, "id": "f3ca063f", "metadata": {}, "outputs": [], @@ -45,7 +45,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 6, "id": "4bda71ce", "metadata": {}, "outputs": [], @@ -102,7 +102,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "id": "17005669", "metadata": {}, "outputs": [ @@ -113,21 +113,19 @@ "Callable:\n", "\n", "Sequential(\n", - " (0): ReLUConvBN(\n", - " (op): Sequential(\n", - " (0): ReLU()\n", - " (1): LazyConv2d(0, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(2, 2), bias=False)\n", - " (2): LazyBatchNorm2d(0, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", + " (0): Conv2d(3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (1): Identity()\n", + " (2): Conv2d(3, 3, kernel_size=(1, 1), stride=(1, 1))\n", ")\n", "\n", "\n", "Config string:\n", "\n", - "( ( {'out_channels': 3, 'kernel_size': 3, 'stride': 1, 'padding': 1}))\n", + "( ( {'in_channels': 3, 'out_channels': 3, 'kernel_size': 3, 'stride': 1, 'padding': 1}) () ())\n", "\t01 :: \n", - "\t\t02 :: {'out_channels': 3, 'kernel_size': 3, 'stride': 1, 'padding': 1}\n" + "\t\t02 :: {'in_channels': 3, 'out_channels': 3, 'kernel_size': 3, 'stride': 1, 'padding': 1}\n", + "\t\t02 :: \n", + "\t\t02 :: \n" ] } ], @@ -153,7 +151,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "id": "9efeb556", "metadata": {}, "outputs": [], @@ -170,7 +168,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "id": "fa9cabbf", "metadata": {}, "outputs": [ @@ -182,11 +180,11 @@ "\n", " success: 5\n", "\n", - "# Best Found (config 1):\n", + "# Best Found (config 2):\n", "\n", - " objective_to_minimize: -23793.197265625\n", - " config: {'SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[7].resampled_categorical::categorical__3': 0}\n", - " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spaces\\results\\neps_spaces_nn_example\\configs\\config_1\n" + " objective_to_minimize: -1446.885986328125\n", + " config: {'SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 3, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[7].resampled_categorical::categorical__3': 1}\n", + " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spaces\\results\\neps_spaces_nn_example\\configs\\config_2\n" ] }, { @@ -194,53 +192,69 @@ "text/plain": [ "( config.SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 \\\n", " id \n", - " 1 5 \n", - " 2 0 \n", - " 3 5 \n", + " 1 3 \n", + " 2 5 \n", + " 3 4 \n", " 4 3 \n", " 5 3 \n", " \n", " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", " id \n", + " 1 4 \n", + " 2 1 \n", + " 3 \n", + " 4 2 \n", + " 5 1 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 0 \n", + " 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 2 \n", + " 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[2].resampled_categorical::categorical__3 \\\n", + " id \n", + " 1 0 \n", + " 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", + " \n", + " config.SAMPLING__Resolvable.model.args[1].resampled_operation.args.resampled_categorical::categorical__4 \\\n", + " id \n", " 1 3 \n", " 2 \n", - " 3 1 \n", - " 4 1 \n", - " 5 1 \n", + " 3 \n", + " 4 2 \n", + " 5 0 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", + " config.SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", " id \n", - " 1 3 \n", - " 2 \n", + " 1 0 \n", + " 2 2 \n", " 3 \n", " 4 \n", " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", + " config.SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \\\n", " id \n", - " 1 4 \n", + " 1 2 \n", " 2 \n", " 3 \n", " 4 \n", " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 1 \n", - " 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 2 \n", - " 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3 \\\n", + " config.SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", " id \n", " 1 2 \n", " 2 \n", @@ -248,37 +262,21 @@ " 4 \n", " 5 \n", " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \\\n", - " id \n", - " 1 0 \n", - " 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 1 \n", - " 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \\\n", - " id \n", - " 1 1 \n", - " 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", + " config.SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \\\n", + " id \n", + " 1 5 \n", + " 2 \n", + " 3 \n", + " 4 \n", + " 5 \n", " \n", " ... reported_as evaluation_duration \\\n", " id ... \n", - " 1 ... success 0.009104 \n", - " 2 ... success 0.006249 \n", - " 3 ... success 0.005574 \n", - " 4 ... success 0.005795 \n", - " 5 ... success 0.00329 \n", + " 1 ... success 0.054059 \n", + " 2 ... success 0.054146 \n", + " 3 ... success 0.000809 \n", + " 4 ... success 0.012381 \n", + " 5 ... success 0.002975 \n", " \n", " location state \\\n", " id \n", @@ -288,98 +286,51 @@ " 4 C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spa... State.SUCCESS \n", " 5 C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spa... State.SUCCESS \n", " \n", - " sampling_worker_id time_sampled \\\n", - " id \n", - " 1 7648-2025-07-04T23:22:27.070755+00:00 1751671347.079726 \n", - " 2 7648-2025-07-04T23:22:27.070755+00:00 1751671347.110561 \n", - " 3 7648-2025-07-04T23:22:27.070755+00:00 1751671347.136625 \n", - " 4 7648-2025-07-04T23:22:27.070755+00:00 1751671347.163217 \n", - " 5 7648-2025-07-04T23:22:27.070755+00:00 1751671347.188885 \n", + " sampling_worker_id time_sampled \\\n", + " id \n", + " 1 27288-2025-07-07T20:19:02.309700+00:00 1751919542.338901 \n", + " 2 27288-2025-07-07T20:19:02.309700+00:00 1751919542.423545 \n", + " 3 27288-2025-07-07T20:19:02.309700+00:00 1751919542.504156 \n", + " 4 27288-2025-07-07T20:19:02.309700+00:00 1751919542.529779 \n", + " 5 27288-2025-07-07T20:19:02.309700+00:00 1751919542.566814 \n", " \n", - " evaluating_worker_id evaluation_duration \\\n", - " id \n", - " 1 7648-2025-07-04T23:22:27.070755+00:00 0.009104 \n", - " 2 7648-2025-07-04T23:22:27.070755+00:00 0.006249 \n", - " 3 7648-2025-07-04T23:22:27.070755+00:00 0.005574 \n", - " 4 7648-2025-07-04T23:22:27.070755+00:00 0.005795 \n", - " 5 7648-2025-07-04T23:22:27.070755+00:00 0.00329 \n", + " evaluating_worker_id evaluation_duration \\\n", + " id \n", + " 1 27288-2025-07-07T20:19:02.309700+00:00 0.054059 \n", + " 2 27288-2025-07-07T20:19:02.309700+00:00 0.054146 \n", + " 3 27288-2025-07-07T20:19:02.309700+00:00 0.000809 \n", + " 4 27288-2025-07-07T20:19:02.309700+00:00 0.012381 \n", + " 5 27288-2025-07-07T20:19:02.309700+00:00 0.002975 \n", " \n", " time_started time_end \n", " id \n", - " 1 1751671347.080621 1751671347.093368 \n", - " 2 1751671347.11144 1751671347.121197 \n", - " 3 1751671347.137631 1751671347.146662 \n", - " 4 1751671347.164088 1751671347.173416 \n", - " 5 1751671347.189788 1751671347.196588 \n", + " 1 1751919542.340261 1751919542.399379 \n", + " 2 1751919542.424551 1751919542.483701 \n", + " 3 1751919542.505031 1751919542.509259 \n", + " 4 1751919542.530671 1751919542.54665 \n", + " 5 1751919542.567737 1751919542.574195 \n", " \n", - " [5 rows x 66 columns],\n", - " num_success 5.0\n", - " best_objective_to_minimize -23793.197266\n", - " best_config_id 1\n", - " SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 5\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6 3\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 3\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 4\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 1\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3 2\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3 2\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 0\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 1\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 1\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 2\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 4\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 2\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3 1\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3 0\n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args.resampled_categorical::categorical__6 1\n", - " SAMPLING__Resolvable.model.args[2].resampled_categorical::categorical__3 2\n", - " SAMPLING__Resolvable.model.args[3].resampled_categorical::categorical__3 1\n", - " SAMPLING__Resolvable.model.args[4].resampled_categorical::categorical__3 2\n", - " SAMPLING__Resolvable.model.args[5].resampled_categorical::categorical__3 0\n", - " SAMPLING__Resolvable.model.args[6].resampled_categorical::categorical__3 0\n", - " SAMPLING__Resolvable.model.args[7].resampled_categorical::categorical__3 0\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3 \n", - " dtype: object)" + " [5 rows x 254 columns],\n", + " num_success 5.0\n", + " best_objective_to_minimize -1446.885986\n", + " best_config_id 2\n", + " SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 5\n", + " SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6 1\n", + " ... \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3 \n", + " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3 \n", + " Length: 245, dtype: object)" ] }, - "execution_count": 7, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from neps.optimizers.neps_algorithms import neps_random_search\n", "import neps\n", "\n", "pipeline_space = NN_Space()\n", @@ -387,7 +338,7 @@ "neps.run(\n", " evaluate_pipeline=evaluate_pipeline,\n", " pipeline_space=pipeline_space,\n", - " optimizer=neps_random_search,\n", + " optimizer=neps.neps_algorithms.neps_random_search,\n", " root_directory=\"results/neps_spaces_nn_example\",\n", " post_run_summary=True,\n", " max_evaluations_total=5,\n", From 5aca1b53e19989d66278879ae679a00eef976bc6 Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 9 Jul 2025 15:05:58 +0200 Subject: [PATCH 026/156] Refactor hyperparameter space definitions and update tests to use new HPO classes - Updated the image segmentation example to utilize HPO_Float and HPO_Integer for hyperparameter definitions. - Refactored test cases to replace old parameter classes with HPO variants, ensuring compatibility with the new hyperparameter optimization framework. - Added log sampling for NePS-Integer - Adjusted imports in test files to reflect changes in the neps library structure. - Enhanced the configuration encoder tests to validate the new HPO parameter types. - Ensured all relevant tests are passing with the updated hyperparameter definitions. --- docs/reference/neps_spaces.md | 2 + neps/__init__.py | 25 +- neps/api.py | 56 ++- neps/optimizers/__init__.py | 11 +- neps/optimizers/algorithms.py | 389 ++++++++++++++++-- neps/optimizers/bayesian_optimization.py | 14 +- neps/optimizers/bracket_optimizer.py | 16 +- neps/optimizers/ifbo.py | 25 +- neps/optimizers/models/ftpfn.py | 18 +- neps/optimizers/neps_algorithms.py | 192 --------- neps/optimizers/neps_priorband.py | 5 +- neps/optimizers/random_search.py | 13 +- neps/optimizers/utils/grid.py | 15 +- neps/sampling/priors.py | 10 +- neps/space/__init__.py | 16 +- neps/space/encoding.py | 6 +- neps/space/neps_spaces/neps_space.py | 87 ++++ neps/space/neps_spaces/parameters.py | 13 +- neps/space/parameters.py | 32 +- neps/space/parsing.py | 56 +-- neps/space/search_space.py | 36 +- neps/status/status.py | 61 ++- neps_examples/basic_usage/hyperparameters.py | 16 +- .../convenience/logging_additional_info.py | 16 +- .../convenience/neps_tblogger_tutorial.py | 25 +- .../convenience/running_on_slurm_scripts.py | 10 +- .../working_directory_per_pipeline.py | 12 +- .../expert_priors_for_hyperparameters.py | 48 ++- neps_examples/efficiency/multi_fidelity.py | 15 +- .../multi_fidelity_and_expert_priors.py | 37 +- .../efficiency/pytorch_lightning_ddp.py | 41 +- .../efficiency/pytorch_lightning_fsdp.py | 22 +- .../efficiency/pytorch_native_ddp.py | 44 +- .../efficiency/pytorch_native_fsdp.py | 98 ++--- neps_examples/experimental/freeze_thaw.py | 30 +- .../neps_spaces/pytorch_nn_example.ipynb | 230 ++++------- .../real_world/image_segmentation_hpo.py | 103 +++-- tests/test_config_encoder.py | 14 +- .../test_neps_space/test_neps_integration.py | 18 +- ...st_neps_integration_priorband__max_cost.py | 53 +-- ...t_neps_integration_priorband__max_evals.py | 53 +-- .../test_default_report_values.py | 8 +- .../test_error_handling_strategies.py | 8 +- tests/test_runtime/test_stopping_criterion.py | 18 +- tests/test_search_space.py | 26 +- tests/test_search_space_parsing.py | 35 +- tests/test_state/test_neps_state.py | 160 ++++--- 47 files changed, 1279 insertions(+), 959 deletions(-) delete mode 100644 neps/optimizers/neps_algorithms.py diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 92724d7f5..3f795dc02 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -21,6 +21,7 @@ Additionally, **NePS spaces** can describe [complex (hierarchical) architectures A **NePS space** is defined as a subclass of [`Pipeline`][neps.space.neps_spaces.parameters.Pipeline]: ```python +from neps.space.neps_spaces.parameters import Pipeline, Float, Integer, Categorical, Fidelity, Resampled, Operation class pipeline_space(Pipeline): ``` @@ -32,6 +33,7 @@ Here we define the hyperparameters that make up the space, like so: float_param = Float(min_value=0.1, max_value=1.0) int_param = Integer(min_value=1, max_value=10) cat_param = Categorical(choices=["A", "B", "C"]) + epochs = Fidelity(Integer(1, 16)) ``` !!! tip "**Using your knowledge, providing a Prior**" diff --git a/neps/__init__.py b/neps/__init__.py index 754f7d975..aa1883d36 100644 --- a/neps/__init__.py +++ b/neps/__init__.py @@ -6,12 +6,22 @@ """ from neps.api import run -from neps.optimizers import algorithms, neps_algorithms +from neps.optimizers import algorithms from neps.optimizers.ask_and_tell import AskAndTell from neps.optimizers.optimizer import SampledConfig from neps.plot.plot import plot from neps.plot.tensorboard_eval import tblogger -from neps.space import Categorical, Constant, Float, Integer, SearchSpace +from neps.space import HPOCategorical, HPOConstant, HPOFloat, HPOInteger, SearchSpace +from neps.space.neps_spaces.parameters import ( + Categorical, + ConfidenceLevel, + Fidelity, + Float, + Integer, + Operation, + Pipeline, + Resampled, +) from neps.state import BudgetInfo, Trial from neps.status.status import status from neps.utils.files import load_and_merge_yamls as load_yamls @@ -20,15 +30,22 @@ "AskAndTell", "BudgetInfo", "Categorical", - "Constant", + "ConfidenceLevel", + "Fidelity", "Float", + "HPOCategorical", + "HPOConstant", + "HPOFloat", + "HPOInteger", "Integer", + "Operation", + "Pipeline", + "Resampled", "SampledConfig", "SearchSpace", "Trial", "algorithms", "load_yamls", - "neps_algorithms", "plot", "run", "status", diff --git a/neps/api.py b/neps/api.py index 6017ab950..4e3b84ed0 100644 --- a/neps/api.py +++ b/neps/api.py @@ -5,12 +5,18 @@ import logging import warnings from collections.abc import Callable, Mapping +from functools import partial from pathlib import Path from typing import TYPE_CHECKING, Any, Concatenate, Literal +import neps +import neps.optimizers.algorithms from neps.optimizers import AskFunction, OptimizerChoice, load_optimizer from neps.runtime import _launch_runtime -from neps.space.neps_spaces.neps_space import adjust_evaluation_pipeline_for_neps_space +from neps.space.neps_spaces.neps_space import ( + adjust_evaluation_pipeline_for_neps_space, + convert_neps_to_classic_search_space, +) from neps.space.neps_spaces.parameters import Pipeline from neps.space.parsing import convert_to_space from neps.status.status import post_run_csv @@ -20,20 +26,15 @@ from ConfigSpace import ConfigurationSpace from neps.optimizers.algorithms import CustomOptimizer - from neps.space import Parameter, SearchSpace + from neps.space import SearchSpace from neps.state import EvaluatePipelineReturn logger = logging.getLogger(__name__) -def run( # noqa: PLR0913 +def run( # noqa: PLR0913, C901 evaluate_pipeline: Callable[..., EvaluatePipelineReturn] | str, - pipeline_space: ( - Mapping[str, dict | str | int | float | Parameter] - | SearchSpace - | ConfigurationSpace - | Pipeline - ), + pipeline_space: ConfigurationSpace | Pipeline, *, root_directory: str | Path = "neps_results", overwrite_working_directory: bool = False, @@ -417,6 +418,43 @@ def __call__( logger.info(f"Starting neps.run using root directory {root_directory}") + # Check if the pipeline_space only contains basic HPO parameters. + # If yes, we convert it to a classic SearchSpace, to use with the old optimizers. + # If no, we use adjust_evaluation_pipeline_for_neps_space to convert the + # pipeline_space and only use the new NEPS optimizers. + + # If the optimizer is not a NEPS algorithm, we try to convert the pipeline_space + inner_optimizer = None + if isinstance(optimizer, partial): + inner_optimizer = optimizer.func + while isinstance(inner_optimizer, partial): + inner_optimizer = inner_optimizer.func + if ( + optimizer + not in ( + neps.optimizers.algorithms.neps_random_search, + neps.optimizers.algorithms.neps_priorband, + neps.optimizers.algorithms.neps_complex_random_search, + ) + and ( + not inner_optimizer + or inner_optimizer + not in ( + neps.optimizers.algorithms.neps_random_search, + neps.optimizers.algorithms.neps_priorband, + neps.optimizers.algorithms.neps_complex_random_search, + ) + ) + and optimizer != "auto" + ): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space: + logger.info( + "The provided pipeline_space only contains basic HPO parameters, " + "converting it to a classic SearchSpace." + ) + pipeline_space = converted_space + if isinstance(pipeline_space, Pipeline): assert not isinstance(evaluate_pipeline, str) evaluate_pipeline = adjust_evaluation_pipeline_for_neps_space( diff --git a/neps/optimizers/__init__.py b/neps/optimizers/__init__.py index 34fb5f5bb..8cff9e3e8 100644 --- a/neps/optimizers/__init__.py +++ b/neps/optimizers/__init__.py @@ -10,16 +10,16 @@ determine_optimizer_automatically, ) from neps.optimizers.optimizer import AskFunction, OptimizerInfo -from neps.space.neps_spaces.parameters import Pipeline from neps.utils.common import extract_keyword_defaults if TYPE_CHECKING: from neps.space import SearchSpace + from neps.space.neps_spaces.parameters import Pipeline def _load_optimizer_from_string( optimizer: OptimizerChoice | Literal["auto"], - space: SearchSpace, + space: SearchSpace | Pipeline, *, optimizer_kwargs: Mapping[str, Any] | None = None, ) -> tuple[AskFunction, OptimizerInfo]: @@ -38,7 +38,7 @@ def _load_optimizer_from_string( keywords = extract_keyword_defaults(optimizer_build) optimizer_kwargs = optimizer_kwargs or {} - opt = optimizer_build(space, **optimizer_kwargs) + opt = optimizer_build(space, **optimizer_kwargs) # type: ignore info = OptimizerInfo(name=_optimizer, info={**keywords, **optimizer_kwargs}) return opt, info @@ -59,11 +59,6 @@ def load_optimizer( match optimizer: # Predefined string (including "auto") case str(): - if isinstance(space, Pipeline): - raise ValueError( - "String optimizers are not yet available for NePS spaces." - ) - return _load_optimizer_from_string(optimizer, space) # Predefined string with kwargs diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index a24155811..0cf751d96 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -24,7 +24,6 @@ from pathlib import Path from typing import TYPE_CHECKING, Any, Concatenate, Literal, TypeAlias -import pandas as pd import torch from neps.optimizers.ask_and_tell import AskAndTell # noqa: F401 @@ -33,24 +32,29 @@ from neps.optimizers.grid_search import GridSearch from neps.optimizers.ifbo import IFBO from neps.optimizers.models.ftpfn import FTPFNSurrogate +from neps.optimizers.neps_bracket_optimizer import _NePSBracketOptimizer +from neps.optimizers.neps_priorband import NePSPriorBandSampler +from neps.optimizers.neps_random_search import NePSComplexRandomSearch, NePSRandomSearch from neps.optimizers.optimizer import AskFunction # noqa: TC001 from neps.optimizers.priorband import PriorBandSampler from neps.optimizers.random_search import RandomSearch from neps.sampling import Prior, Sampler, Uniform from neps.space.encoding import CategoricalToUnitNorm, ConfigEncoder +from neps.space.neps_spaces.neps_space import convert_neps_to_classic_search_space +from neps.space.neps_spaces.parameters import Pipeline if TYPE_CHECKING: import pandas as pd from neps.optimizers.utils.brackets import Bracket from neps.space import SearchSpace - from neps.space.neps_spaces.parameters import Pipeline + logger = logging.getLogger(__name__) -def _bo( - pipeline_space: SearchSpace, +def _bo( # noqa: C901, PLR0912 + pipeline_space: SearchSpace | Pipeline, *, initial_design_size: int | Literal["ndim"] = "ndim", use_priors: bool, @@ -87,6 +91,15 @@ def _bo( ValueError: if initial_design_size < 1 ValueError: if fidelity is not None and ignore_fidelity is False """ + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) if not ignore_fidelity and pipeline_space.fidelity is not None: raise ValueError( "Fidelities are not supported for BayesianOptimization. Consider setting the" @@ -135,7 +148,7 @@ def _bo( def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 - pipeline_space: SearchSpace, + pipeline_space: SearchSpace | Pipeline, *, bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], eta: int, @@ -207,6 +220,15 @@ def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 multi_objective: Whether to use multi-objective promotion strategies. Only used in case of multi-objective multi-fidelity algorithms. """ + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) if pipeline_space.fidelity is not None: fidelity_name, fidelity = pipeline_space.fidelity else: @@ -375,7 +397,11 @@ def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 ) -def determine_optimizer_automatically(space: SearchSpace) -> str: +def determine_optimizer_automatically(space: SearchSpace | Pipeline) -> str: + if isinstance(space, Pipeline): + if space.fidelity_attrs: + return "neps_priorband" + return "neps_complex_random_search" has_prior = any( parameter.prior is not None for parameter in space.searchables.values() ) @@ -395,7 +421,7 @@ def determine_optimizer_automatically(space: SearchSpace) -> str: def random_search( - pipeline_space: SearchSpace, + pipeline_space: SearchSpace | Pipeline, *, use_priors: bool = False, ignore_fidelity: bool | Literal["highest fidelity"] = False, @@ -411,6 +437,15 @@ def random_search( ignore_fidelity: Whether to ignore fidelity when sampling. In this case, the max fidelity is always used. """ + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) assert ignore_fidelity in ( True, False, @@ -466,7 +501,7 @@ def random_search( def grid_search( - pipeline_space: SearchSpace, + pipeline_space: SearchSpace | Pipeline, *, ignore_fidelity: bool = False, ) -> GridSearch: @@ -480,6 +515,16 @@ def grid_search( """ from neps.optimizers.utils.grid import make_grid + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) + if any( parameter.prior is not None for parameter in pipeline_space.searchables.values() ): @@ -496,7 +541,7 @@ def grid_search( def ifbo( - pipeline_space: SearchSpace, + pipeline_space: SearchSpace | Pipeline, *, step_size: int | float = 1, use_priors: bool = False, @@ -546,6 +591,15 @@ def ifbo( surrogate_path: Path to the surrogate model to use surrogate_version: Version of the surrogate model to use """ + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) from neps.optimizers.ifbo import _adjust_space_to_match_stepsize if pipeline_space.fidelity is None: @@ -615,7 +669,7 @@ def ifbo( def successive_halving( - space: SearchSpace, + pipeline_space: SearchSpace | Pipeline, *, sampler: Literal["uniform", "prior"] = "uniform", eta: int = 3, @@ -683,8 +737,17 @@ def successive_halving( sample_prior_first: Whether to sample the prior configuration first, and if so, should it be at the highest fidelity level. """ + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) return _bracket_optimizer( - pipeline_space=space, + pipeline_space=pipeline_space, bracket_type="successive_halving", eta=eta, early_stopping_rate=early_stopping_rate, @@ -697,7 +760,7 @@ def successive_halving( def hyperband( - space: SearchSpace, + pipeline_space: SearchSpace | Pipeline, *, eta: int = 3, sampler: Literal["uniform", "prior"] = "uniform", @@ -747,8 +810,17 @@ def hyperband( sample_prior_first: Whether to sample the prior configuration first, and if so, should it be at the highest fidelity level. """ + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) return _bracket_optimizer( - pipeline_space=space, + pipeline_space=pipeline_space, bracket_type="hyperband", eta=eta, sampler=sampler, @@ -761,7 +833,7 @@ def hyperband( def mo_hyperband( - space: SearchSpace, + pipeline_space: SearchSpace | Pipeline, *, eta: int = 3, sampler: Literal["uniform", "prior"] = "uniform", @@ -771,8 +843,17 @@ def mo_hyperband( """Multi-objective version of hyperband using the same candidate selection method as MOASHA. """ + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) return _bracket_optimizer( - pipeline_space=space, + pipeline_space=pipeline_space, bracket_type="hyperband", eta=eta, sampler=sampler, @@ -787,7 +868,7 @@ def mo_hyperband( def asha( - space: SearchSpace, + pipeline_space: SearchSpace | Pipeline, *, eta: int = 3, early_stopping_rate: int = 0, @@ -836,9 +917,17 @@ def asha( sample_prior_first: Whether to sample the prior configuration first, and if so, should it be at the highest fidelity. """ - + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) return _bracket_optimizer( - pipeline_space=space, + pipeline_space=pipeline_space, bracket_type="asha", eta=eta, early_stopping_rate=early_stopping_rate, @@ -851,7 +940,7 @@ def asha( def moasha( - space: SearchSpace, + pipeline_space: SearchSpace | Pipeline, *, eta: int = 3, early_stopping_rate: int = 0, @@ -859,8 +948,17 @@ def moasha( sample_prior_first: bool | Literal["highest_fidelity"] = False, mo_selector: Literal["nsga2", "epsnet"] = "epsnet", ) -> BracketOptimizer: + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) return _bracket_optimizer( - pipeline_space=space, + pipeline_space=pipeline_space, bracket_type="asha", eta=eta, early_stopping_rate=early_stopping_rate, @@ -875,7 +973,7 @@ def moasha( def async_hb( - space: SearchSpace, + pipeline_space: SearchSpace | Pipeline, *, eta: int = 3, sampler: Literal["uniform", "prior"] = "uniform", @@ -921,8 +1019,17 @@ def async_hb( sample_prior_first: Whether to sample the prior configuration first. """ + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) return _bracket_optimizer( - pipeline_space=space, + pipeline_space=pipeline_space, bracket_type="async_hb", eta=eta, sampler=sampler, @@ -935,7 +1042,7 @@ def async_hb( def priorband( - space: SearchSpace, + pipeline_space: SearchSpace | Pipeline, *, eta: int = 3, sample_prior_first: bool | Literal["highest_fidelity"] = False, @@ -979,13 +1086,22 @@ def priorband( `N` * `maximum_fidelity` worth of fidelity has been evaluated, proceed with bayesian optimization when sampling a new configuration. """ - if all(parameter.prior is None for parameter in space.searchables.values()): + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) + if all(parameter.prior is None for parameter in pipeline_space.searchables.values()): logger.warning( "Warning: No priors are defined in the search space, priorband will sample" " uniformly. Consider using hyperband instead." ) return _bracket_optimizer( - pipeline_space=space, + pipeline_space=pipeline_space, bracket_type=base, eta=eta, sampler="priorband", @@ -997,7 +1113,7 @@ def priorband( def bayesian_optimization( - space: SearchSpace, + pipeline_space: SearchSpace, *, initial_design_size: int | Literal["ndim"] = "ndim", cost_aware: bool | Literal["log"] = False, @@ -1055,23 +1171,34 @@ def bayesian_optimization( optimization. If `None`, the reference point will be calculated automatically. """ + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) - if not ignore_fidelity and space.fidelity is not None: + if not ignore_fidelity and pipeline_space.fidelity is not None: raise ValueError( "Fidelities are not supported for BayesianOptimization. Consider setting the" " fidelity to a constant value or ignoring it using ignore_fidelity to" - f" always sample at max fidelity. Got fidelity: {space.fidelities} " + f" always sample at max fidelity. Got fidelity: {pipeline_space.fidelities} " ) - if ignore_fidelity and space.fidelity is None: + if ignore_fidelity and pipeline_space.fidelity is None: logger.warning( "Warning: You are using ignore_fidelity, but no fidelity is defined in the" " search space. Consider setting ignore_fidelity to False." ) - if any(parameter.prior is not None for parameter in space.searchables.values()): + if any( + parameter.prior is not None for parameter in pipeline_space.searchables.values() + ): priors = [ parameter - for parameter in space.searchables.values() + for parameter in pipeline_space.searchables.values() if parameter.prior is not None ] raise ValueError( @@ -1080,7 +1207,7 @@ def bayesian_optimization( ) return _bo( - pipeline_space=space, + pipeline_space=pipeline_space, initial_design_size=initial_design_size, cost_aware=cost_aware, device=device, @@ -1092,7 +1219,7 @@ def bayesian_optimization( def pibo( - space: SearchSpace, + pipeline_space: SearchSpace | Pipeline, *, initial_design_size: int | Literal["ndim"] = "ndim", cost_aware: bool | Literal["log"] = False, @@ -1131,19 +1258,28 @@ def pibo( ignore_fidelity: Whether to ignore the fidelity parameter when sampling. In this case, the max fidelity is always used. """ - if all(parameter.prior is None for parameter in space.searchables.values()): + if isinstance(pipeline_space, Pipeline): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space is not None: + pipeline_space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) + if all(parameter.prior is None for parameter in pipeline_space.searchables.values()): logger.warning( "Warning: PiBO was called without any priors - using uniform priors on all" " parameters.\nConsider using Bayesian Optimization instead." ) - if ignore_fidelity and space.fidelity is None: + if ignore_fidelity and pipeline_space.fidelity is None: logger.warning( "Warning: You are using ignore_fidelity, but no fidelity is defined in the" " search space. Consider setting ignore_fidelity to False." ) return _bo( - pipeline_space=space, + pipeline_space=pipeline_space, initial_design_size=initial_design_size, cost_aware=cost_aware, device=device, @@ -1194,9 +1330,182 @@ def custom( ) +def neps_complex_random_search( + pipeline: Pipeline, + *_args: Any, + **_kwargs: Any, +) -> NePSComplexRandomSearch: + """A complex random search algorithm that samples configurations uniformly at random, + but allows for more complex sampling strategies. + + Args: + pipeline: The search space to sample from. + """ + + return NePSComplexRandomSearch( + pipeline=pipeline, + ) + + +def neps_random_search( + pipeline: Pipeline, + *_args: Any, + **_kwargs: Any, +) -> NePSRandomSearch: + """A simple random search algorithm that samples configurations uniformly at random. + + Args: + pipeline: The search space to sample from. + """ + + return NePSRandomSearch( + pipeline=pipeline, + ) + + +def _neps_bracket_optimizer( + pipeline_space: Pipeline, + *, + bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], + eta: int, + sampler: Literal["priorband"], + sample_prior_first: bool | Literal["highest_fidelity"], + early_stopping_rate: int | None, +) -> _NePSBracketOptimizer: + fidelity_attrs = pipeline_space.fidelity_attrs + + if len(fidelity_attrs.items()) != 1: + raise ValueError( + "Only one fidelity should be defined in the pipeline space." + f"\nGot: {fidelity_attrs!r}" + ) + + fidelity_name, fidelity_obj = next(iter(fidelity_attrs.items())) + + if sample_prior_first not in (True, False, "highest_fidelity"): + raise ValueError( + "sample_prior_first should be either True, False or 'highest_fidelity'" + ) + + from neps.optimizers.utils import brackets + + # Determine the strategy for creating brackets for sampling + create_brackets: Callable[[pd.DataFrame], Sequence[Bracket] | Bracket] + match bracket_type: + case "successive_halving": + assert early_stopping_rate is not None + rung_to_fidelity, rung_sizes = brackets.calculate_sh_rungs( + bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + eta=eta, + early_stopping_rate=early_stopping_rate, + ) + create_brackets = partial( + brackets.Sync.create_repeating, + rung_sizes=rung_sizes, + ) + + case "hyperband": + assert early_stopping_rate is None + rung_to_fidelity, bracket_layouts = brackets.calculate_hb_bracket_layouts( + bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + eta=eta, + ) + create_brackets = partial( + brackets.Hyperband.create_repeating, + bracket_layouts=bracket_layouts, + ) + + case "asha": + assert early_stopping_rate is not None + rung_to_fidelity, _rung_sizes = brackets.calculate_sh_rungs( + bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + eta=eta, + early_stopping_rate=early_stopping_rate, + ) + create_brackets = partial( + brackets.Async.create, + rungs=list(rung_to_fidelity), + eta=eta, + ) + + case "async_hb": + assert early_stopping_rate is None + rung_to_fidelity, bracket_layouts = brackets.calculate_hb_bracket_layouts( + bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + eta=eta, + ) + # We don't care about the capacity of each bracket, we need the rung layout + bracket_rungs = [list(bracket.keys()) for bracket in bracket_layouts] + create_brackets = partial( + brackets.AsyncHyperband.create, + bracket_rungs=bracket_rungs, + eta=eta, + ) + case _: + raise ValueError(f"Unknown bracket type: {bracket_type}") + + _sampler: NePSPriorBandSampler + match sampler: + case "priorband": + _sampler = NePSPriorBandSampler( + space=pipeline_space, + eta=eta, + early_stopping_rate=( + early_stopping_rate if early_stopping_rate is not None else 0 + ), + fid_bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + ) + case _: + raise ValueError(f"Unknown sampler: {sampler}") + + return _NePSBracketOptimizer( + space=pipeline_space, + eta=eta, + rung_to_fid=rung_to_fidelity, + sampler=_sampler, + sample_prior_first=sample_prior_first, + create_brackets=create_brackets, + ) + + +def neps_priorband( + space: Pipeline, + *, + eta: int = 3, + sample_prior_first: bool | Literal["highest_fidelity"] = False, + base: Literal["successive_halving", "hyperband", "asha", "async_hb"] = "hyperband", +) -> _NePSBracketOptimizer: + """Create a PriorBand optimizer for the given pipeline space. + + Args: + space: The pipeline space to optimize over. + eta: The eta parameter for the algorithm. + sample_prior_first: Whether to sample the prior first. + If set to `"highest_fidelity"`, the prior will be sampled at the + highest fidelity, otherwise at the lowest fidelity. + base: The type of bracket optimizer to use. One of: + - "successive_halving" + - "hyperband" + - "asha" + - "async_hb" + Returns: + An instance of _BracketOptimizer configured for PriorBand sampling. + """ + return _neps_bracket_optimizer( + pipeline_space=space, + bracket_type=base, + eta=eta, + sampler="priorband", + sample_prior_first=sample_prior_first, + early_stopping_rate=0 if base in ("successive_halving", "asha") else None, + ) + + PredefinedOptimizers: Mapping[ str, - Callable[Concatenate[SearchSpace, ...], AskFunction], + Callable[Concatenate[SearchSpace, ...], AskFunction] + | Callable[Concatenate[Pipeline, ...], AskFunction] + | Callable[Concatenate[SearchSpace, Pipeline, ...], AskFunction], ] = { f.__name__: f for f in ( @@ -1212,6 +1521,9 @@ def custom( moasha, async_hb, priorband, + neps_random_search, + neps_complex_random_search, + neps_priorband, ) } @@ -1228,4 +1540,7 @@ def custom( "random_search", "grid_search", "ifbo", + "neps_random_search", + "neps_complex_random_search", + "neps_priorband", ] diff --git a/neps/optimizers/bayesian_optimization.py b/neps/optimizers/bayesian_optimization.py index 0c8f84e86..2784fcc34 100644 --- a/neps/optimizers/bayesian_optimization.py +++ b/neps/optimizers/bayesian_optimization.py @@ -22,6 +22,8 @@ ) from neps.optimizers.optimizer import SampledConfig from neps.optimizers.utils.initial_design import make_initial_design +from neps.space.neps_spaces.neps_space import convert_neps_to_classic_search_space +from neps.space.neps_spaces.parameters import Pipeline if TYPE_CHECKING: from neps.sampling import Prior @@ -63,7 +65,7 @@ def _pibo_exp_term( class BayesianOptimization: """Uses `botorch` as an engine for doing bayesian optimiziation.""" - space: SearchSpace + space: SearchSpace | Pipeline """The search space to use.""" encoder: ConfigEncoder @@ -93,6 +95,16 @@ def __call__( # noqa: C901, PLR0912, PLR0915 # noqa: C901, PLR0912 budget_info: BudgetInfo | None = None, n: int | None = None, ) -> SampledConfig | list[SampledConfig]: + if isinstance(self.space, Pipeline): + converted_space = convert_neps_to_classic_search_space(self.space) + if converted_space is not None: + self.space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) + # If fidelities exist, sample from them as normal # This is a bit of a hack, as we set them to max fidelity # afterwards, but we need the complete space to sample diff --git a/neps/optimizers/bracket_optimizer.py b/neps/optimizers/bracket_optimizer.py index baeea37ff..74dafeace 100644 --- a/neps/optimizers/bracket_optimizer.py +++ b/neps/optimizers/bracket_optimizer.py @@ -21,6 +21,8 @@ from neps.optimizers.priorband import PriorBandSampler from neps.optimizers.utils.brackets import PromoteAction, SampleAction from neps.sampling.samplers import Sampler +from neps.space.neps_spaces.neps_space import convert_neps_to_classic_search_space +from neps.space.neps_spaces.parameters import Pipeline from neps.utils.common import disable_warnings if TYPE_CHECKING: @@ -212,7 +214,7 @@ class BracketOptimizer: `"successive_halving"`, `"asha"`, `"hyperband"`, etc. """ - space: SearchSpace + space: SearchSpace | Pipeline """The pipeline space to optimize over.""" encoder: ConfigEncoder @@ -251,12 +253,22 @@ class BracketOptimizer: fid_name: str """The name of the fidelity in the space.""" - def __call__( # noqa: C901, PLR0912 + def __call__( # noqa: C901, PLR0912, PLR0915 self, trials: Mapping[str, Trial], budget_info: BudgetInfo | None, n: int | None = None, ) -> SampledConfig | list[SampledConfig]: + if isinstance(self.space, Pipeline): + converted_space = convert_neps_to_classic_search_space(self.space) + if converted_space is not None: + self.space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) + assert n is None, "TODO" space = self.space parameters = space.searchables diff --git a/neps/optimizers/ifbo.py b/neps/optimizers/ifbo.py index 4e7d90726..13c2ed426 100755 --- a/neps/optimizers/ifbo.py +++ b/neps/optimizers/ifbo.py @@ -16,7 +16,9 @@ from neps.optimizers.optimizer import SampledConfig from neps.optimizers.utils.initial_design import make_initial_design from neps.sampling import Prior, Sampler -from neps.space import ConfigEncoder, Domain, Float, Integer, SearchSpace +from neps.space import ConfigEncoder, Domain, HPOFloat, HPOInteger, SearchSpace +from neps.space.neps_spaces.neps_space import convert_neps_to_classic_search_space +from neps.space.neps_spaces.parameters import Pipeline if TYPE_CHECKING: from neps.state import BudgetInfo, Trial @@ -68,10 +70,10 @@ def _adjust_space_to_match_stepsize( r = x - n * step_size new_lower = fidelity.lower + r - new_fid: Float | Integer + new_fid: HPOFloat | HPOInteger match fidelity: - case Float(): - new_fid = Float( + case HPOFloat(): + new_fid = HPOFloat( lower=float(new_lower), upper=float(fidelity.upper), log=fidelity.log, @@ -79,8 +81,8 @@ def _adjust_space_to_match_stepsize( is_fidelity=True, prior_confidence=fidelity.prior_confidence, ) - case Integer(): - new_fid = Integer( + case HPOInteger(): + new_fid = HPOInteger( lower=int(new_lower), upper=int(fidelity.upper), log=fidelity.log, @@ -102,7 +104,7 @@ class IFBO: * Github: https://github.com/automl/ifBO/tree/main """ - space: SearchSpace + space: SearchSpace | Pipeline """The entire search space for the pipeline.""" encoder: ConfigEncoder @@ -135,6 +137,15 @@ def __call__( budget_info: BudgetInfo | None = None, n: int | None = None, ) -> SampledConfig | list[SampledConfig]: + if isinstance(self.space, Pipeline): + converted_space = convert_neps_to_classic_search_space(self.space) + if converted_space is not None: + self.space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) assert self.space.fidelity is not None fidelity_name, fidelity = self.space.fidelity parameters = self.space.searchables diff --git a/neps/optimizers/models/ftpfn.py b/neps/optimizers/models/ftpfn.py index 08a443a54..b69337be8 100644 --- a/neps/optimizers/models/ftpfn.py +++ b/neps/optimizers/models/ftpfn.py @@ -10,7 +10,7 @@ from neps.sampling import Prior, Sampler if TYPE_CHECKING: - from neps.space import ConfigEncoder, Domain, Float, Integer + from neps.space import ConfigEncoder, Domain, HPOFloat, HPOInteger from neps.state.trial import Trial @@ -106,7 +106,7 @@ def _cast_tensor_shapes(x: torch.Tensor) -> torch.Tensor: def encode_ftpfn( trials: Mapping[str, Trial], - fid: tuple[str, Integer | Float], + fid: tuple[str, HPOInteger | HPOFloat], budget_domain: Domain, encoder: ConfigEncoder, *, @@ -168,12 +168,14 @@ def encode_ftpfn( # We could possibly include some bounded transform to assert this. minimize_ys = torch.tensor( [ - pending_value - if trial.report is None - else ( - error_value - if trial.report.objective_to_minimize is None - else trial.report.objective_to_minimize + ( + pending_value + if trial.report is None + else ( + error_value + if trial.report.objective_to_minimize is None + else trial.report.objective_to_minimize + ) ) for trial in trials.values() ], diff --git a/neps/optimizers/neps_algorithms.py b/neps/optimizers/neps_algorithms.py deleted file mode 100644 index 1fd4ba120..000000000 --- a/neps/optimizers/neps_algorithms.py +++ /dev/null @@ -1,192 +0,0 @@ -"""NePS Algorithms -=========== -This module provides implementations of various NePS algorithms for optimizing pipeline -spaces. -""" - -from __future__ import annotations - -from collections.abc import Callable, Sequence -from functools import partial -from typing import TYPE_CHECKING, Any, Literal - -from neps.optimizers.neps_bracket_optimizer import _NePSBracketOptimizer -from neps.optimizers.neps_priorband import NePSPriorBandSampler -from neps.optimizers.neps_random_search import NePSComplexRandomSearch, NePSRandomSearch - -if TYPE_CHECKING: - import pandas as pd - - from neps.optimizers.utils.brackets import Bracket - from neps.space.neps_spaces.parameters import Pipeline - - -def _neps_bracket_optimizer( - pipeline_space: Pipeline, - *, - bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], - eta: int, - sampler: Literal["priorband"], - sample_prior_first: bool | Literal["highest_fidelity"], - early_stopping_rate: int | None, -) -> _NePSBracketOptimizer: - fidelity_attrs = pipeline_space.fidelity_attrs - - if len(fidelity_attrs) != 1: - raise ValueError( - "Only one fidelity should be defined in the pipeline space." - f"\nGot: {fidelity_attrs!r}" - ) - - fidelity_name, fidelity_obj = next(iter(fidelity_attrs.items())) - - if sample_prior_first not in (True, False, "highest_fidelity"): - raise ValueError( - "sample_prior_first should be either True, False or 'highest_fidelity'" - ) - - from neps.optimizers.utils import brackets - - # Determine the strategy for creating brackets for sampling - create_brackets: Callable[[pd.DataFrame], Sequence[Bracket] | Bracket] - match bracket_type: - case "successive_halving": - assert early_stopping_rate is not None - rung_to_fidelity, rung_sizes = brackets.calculate_sh_rungs( - bounds=(fidelity_obj.min_value, fidelity_obj.max_value), - eta=eta, - early_stopping_rate=early_stopping_rate, - ) - create_brackets = partial( - brackets.Sync.create_repeating, - rung_sizes=rung_sizes, - ) - - case "hyperband": - assert early_stopping_rate is None - rung_to_fidelity, bracket_layouts = brackets.calculate_hb_bracket_layouts( - bounds=(fidelity_obj.min_value, fidelity_obj.max_value), - eta=eta, - ) - create_brackets = partial( - brackets.Hyperband.create_repeating, - bracket_layouts=bracket_layouts, - ) - - case "asha": - assert early_stopping_rate is not None - rung_to_fidelity, _rung_sizes = brackets.calculate_sh_rungs( - bounds=(fidelity_obj.min_value, fidelity_obj.max_value), - eta=eta, - early_stopping_rate=early_stopping_rate, - ) - create_brackets = partial( - brackets.Async.create, - rungs=list(rung_to_fidelity), - eta=eta, - ) - - case "async_hb": - assert early_stopping_rate is None - rung_to_fidelity, bracket_layouts = brackets.calculate_hb_bracket_layouts( - bounds=(fidelity_obj.min_value, fidelity_obj.max_value), - eta=eta, - ) - # We don't care about the capacity of each bracket, we need the rung layout - bracket_rungs = [list(bracket.keys()) for bracket in bracket_layouts] - create_brackets = partial( - brackets.AsyncHyperband.create, - bracket_rungs=bracket_rungs, - eta=eta, - ) - case _: - raise ValueError(f"Unknown bracket type: {bracket_type}") - - _sampler: NePSPriorBandSampler - match sampler: - case "priorband": - _sampler = NePSPriorBandSampler( - space=pipeline_space, - eta=eta, - early_stopping_rate=( - early_stopping_rate if early_stopping_rate is not None else 0 - ), - fid_bounds=(fidelity_obj.min_value, fidelity_obj.max_value), - ) - case _: - raise ValueError(f"Unknown sampler: {sampler}") - - return _NePSBracketOptimizer( - space=pipeline_space, - eta=eta, - rung_to_fid=rung_to_fidelity, - sampler=_sampler, - sample_prior_first=sample_prior_first, - create_brackets=create_brackets, - ) - - -def neps_priorband( - space: Pipeline, - *, - eta: int = 3, - sample_prior_first: bool | Literal["highest_fidelity"] = False, - base: Literal["successive_halving", "hyperband", "asha", "async_hb"] = "hyperband", -) -> _NePSBracketOptimizer: - """Create a PriorBand optimizer for the given pipeline space. - - Args: - space: The pipeline space to optimize over. - eta: The eta parameter for the algorithm. - sample_prior_first: Whether to sample the prior first. - If set to `"highest_fidelity"`, the prior will be sampled at the - highest fidelity, otherwise at the lowest fidelity. - base: The type of bracket optimizer to use. One of: - - "successive_halving" - - "hyperband" - - "asha" - - "async_hb" - Returns: - An instance of _BracketOptimizer configured for PriorBand sampling. - """ - return _neps_bracket_optimizer( - pipeline_space=space, - bracket_type=base, - eta=eta, - sampler="priorband", - sample_prior_first=sample_prior_first, - early_stopping_rate=0 if base in ("successive_halving", "asha") else None, - ) - - -def neps_random_search( - pipeline: Pipeline, - *_args: Any, - **_kwargs: Any, -) -> NePSRandomSearch: - """A simple random search algorithm that samples configurations uniformly at random. - - Args: - pipeline: The search space to sample from. - """ - - return NePSRandomSearch( - pipeline=pipeline, - ) - - -def neps_complex_random_search( - pipeline: Pipeline, - *_args: Any, - **_kwargs: Any, -) -> NePSComplexRandomSearch: - """A complex random search algorithm that samples configurations uniformly at random, - but allows for more complex sampling strategies. - - Args: - pipeline: The search space to sample from. - """ - - return NePSComplexRandomSearch( - pipeline=pipeline, - ) diff --git a/neps/optimizers/neps_priorband.py b/neps/optimizers/neps_priorband.py index 5557d6831..874ecfa0f 100644 --- a/neps/optimizers/neps_priorband.py +++ b/neps/optimizers/neps_priorband.py @@ -13,7 +13,6 @@ import numpy as np -import neps.space.neps_spaces.parameters import neps.space.neps_spaces.sampling from neps.optimizers.utils import brackets from neps.space.neps_spaces import neps_space @@ -21,12 +20,14 @@ if TYPE_CHECKING: import pandas as pd + from neps.space.neps_spaces.parameters import Pipeline + @dataclass class NePSPriorBandSampler: """Implement a sampler based on PriorBand.""" - space: neps.space.neps_spaces.parameters.Pipeline + space: Pipeline """The pipeline space to optimize over.""" eta: int diff --git a/neps/optimizers/random_search.py b/neps/optimizers/random_search.py index 376baa782..6d1f0cfa0 100644 --- a/neps/optimizers/random_search.py +++ b/neps/optimizers/random_search.py @@ -5,6 +5,8 @@ from typing import TYPE_CHECKING from neps.optimizers.optimizer import SampledConfig +from neps.space.neps_spaces.neps_space import convert_neps_to_classic_search_space +from neps.space.neps_spaces.parameters import Pipeline if TYPE_CHECKING: from neps.sampling import Sampler @@ -16,7 +18,7 @@ class RandomSearch: """A simple random search optimizer.""" - space: SearchSpace + space: SearchSpace | Pipeline encoder: ConfigEncoder sampler: Sampler @@ -26,6 +28,15 @@ def __call__( budget_info: BudgetInfo | None, n: int | None = None, ) -> SampledConfig | list[SampledConfig]: + if isinstance(self.space, Pipeline): + converted_space = convert_neps_to_classic_search_space(self.space) + if converted_space is not None: + self.space = converted_space + else: + raise ValueError( + "This optimizer only supports HPO search spaces, please use a NePS" + " space-compatible optimizer." + ) n_trials = len(trials) _n = 1 if n is None else n configs = self.sampler.sample(_n, to=self.encoder.domains) diff --git a/neps/optimizers/utils/grid.py b/neps/optimizers/utils/grid.py index 720dd7713..b6ba5ff0c 100644 --- a/neps/optimizers/utils/grid.py +++ b/neps/optimizers/utils/grid.py @@ -5,7 +5,14 @@ import torch -from neps.space import Categorical, Constant, Domain, Float, Integer, SearchSpace +from neps.space import ( + Domain, + HPOCategorical, + HPOConstant, + HPOFloat, + HPOInteger, + SearchSpace, +) def make_grid( @@ -34,11 +41,11 @@ def make_grid( param_ranges: dict[str, list[Any]] = {} for name, hp in space.items(): match hp: - case Categorical(): + case HPOCategorical(): param_ranges[name] = list(hp.choices) - case Constant(): + case HPOConstant(): param_ranges[name] = [hp.value] - case Integer() | Float(): + case HPOInteger() | HPOFloat(): if hp.is_fidelity and ignore_fidelity: param_ranges[name] = [hp.upper] continue diff --git a/neps/sampling/priors.py b/neps/sampling/priors.py index 9b5643d92..8e425f3f8 100644 --- a/neps/sampling/priors.py +++ b/neps/sampling/priors.py @@ -23,7 +23,7 @@ TruncatedNormal, ) from neps.sampling.samplers import Sampler -from neps.space import Categorical, ConfigEncoder, Domain, Float, Integer +from neps.space import ConfigEncoder, Domain, HPOCategorical, HPOFloat, HPOInteger if TYPE_CHECKING: from torch.distributions import Distribution @@ -120,7 +120,7 @@ def uniform(cls, ncols: int) -> Uniform: @classmethod def from_parameters( cls, - parameters: Mapping[str, Categorical | Float | Integer], + parameters: Mapping[str, HPOCategorical | HPOFloat | HPOInteger], *, center_values: Mapping[str, Any] | None = None, confidence_values: Mapping[str, float] | None = None, @@ -160,7 +160,9 @@ def from_parameters( continue confidence_score = confidence_values.get(name, _mapping[hp.prior_confidence]) - center = hp.choices.index(default) if isinstance(hp, Categorical) else default + center = ( + hp.choices.index(default) if isinstance(hp, HPOCategorical) else default + ) centers.append((center, confidence_score)) return Prior.from_domains_and_centers(domains=domains, centers=centers) @@ -356,7 +358,7 @@ def log_pdf( if x.shape[-1] != len(self.distributions): raise ValueError( - f"Got a tensor `x` whose last dimesion (the hyperparameter dimension)" + "Got a tensor `x` whose last dimesion (the hyperparameter dimension)" f" is of length {x.shape[-1]=} but" f" the CenteredPrior called has {len(self.distributions)=}" " distributions to use for calculating the `log_pdf`. Perhaps" diff --git a/neps/space/__init__.py b/neps/space/__init__.py index f2bbc55ca..477596241 100644 --- a/neps/space/__init__.py +++ b/neps/space/__init__.py @@ -1,15 +1,21 @@ from neps.space.domain import Domain from neps.space.encoding import ConfigEncoder -from neps.space.parameters import Categorical, Constant, Float, Integer, Parameter +from neps.space.parameters import ( + HPOCategorical, + HPOConstant, + HPOFloat, + HPOInteger, + Parameter, +) from neps.space.search_space import SearchSpace __all__ = [ - "Categorical", "ConfigEncoder", - "Constant", "Domain", - "Float", - "Integer", + "HPOCategorical", + "HPOConstant", + "HPOFloat", + "HPOInteger", "Parameter", "SearchSpace", ] diff --git a/neps/space/encoding.py b/neps/space/encoding.py index d58c63dc3..b0a8527f9 100644 --- a/neps/space/encoding.py +++ b/neps/space/encoding.py @@ -15,7 +15,7 @@ import torch from neps.space.domain import Domain -from neps.space.parameters import Categorical, Float, Integer, Parameter +from neps.space.parameters import HPOCategorical, HPOFloat, HPOInteger, Parameter V = TypeVar("V", int, float) @@ -486,9 +486,9 @@ def from_parameters( continue match hp: - case Float() | Integer(): + case HPOFloat() | HPOInteger(): transformers[name] = MinMaxNormalizer(hp.domain) # type: ignore - case Categorical(): + case HPOCategorical(): transformers[name] = CategoricalToIntegerTransformer(hp.choices) case _: raise ValueError(f"Unsupported parameter type: {type(hp)}.") diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index aa3c57bd6..7261f67ec 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -9,17 +9,21 @@ import functools from collections.abc import Callable, Generator, Mapping from typing import ( + TYPE_CHECKING, Any, TypeVar, cast, ) +import neps from neps.optimizers import optimizer from neps.space.neps_spaces import config_string from neps.space.neps_spaces.parameters import ( Categorical, Domain, Fidelity, + Float, + Integer, Operation, Pipeline, Resampled, @@ -30,6 +34,10 @@ OnlyPredefinedValuesSampler, RandomSampler, ) +from neps.space.parsing import convert_mapping + +if TYPE_CHECKING: + from neps.space import SearchSpace P = TypeVar("P", bound="Pipeline") @@ -902,3 +910,82 @@ def inner(*args: Any, **kwargs: Any) -> Any: return evaluation_pipeline(*args, **new_kwargs) return inner + + +def convert_neps_to_classic_search_space(space: Pipeline) -> SearchSpace | None: + """Convert a NePS space to a classic SearchSpace if possible. + This function checks if the NePS space can be converted to a classic SearchSpace + by ensuring that it does not contain any complex types like Operation or Resampled, + and that all choices of Categorical parameters are of basic types (int, str, float). + If the checks pass, it converts the NePS space to a classic SearchSpace. + + Args: + space: The NePS space to convert, which should be a Pipeline object. + + Returns: + A classic SearchSpace if the conversion is possible, otherwise None. + """ + # First check: No parameters are of type Operation or Resampled + if not any( + isinstance(param, Operation | Resampled) for param in space.get_attrs().values() + ): + # Second check: All choices of all categoricals are of basic + # types i.e. int, str or float + categoricals = [ + param + for param in space.get_attrs().values() + if isinstance(param, Categorical) + ] + if all( + any( + all(isinstance(choice, datatype) for choice in list(cat_param.choices)) # type: ignore + for datatype in [int, float, str] + ) + for cat_param in categoricals + ): + # If both checks pass, convert the space to a classic SearchSpace + classic_space: dict[str, Any] = {} + for key, value in space.get_attrs().items(): + if isinstance(value, Categorical): + classic_space[key] = neps.HPOCategorical( + choices=list(set(value.choices)), # type: ignore + prior=value.choices[value.prior] if value.has_prior else None, # type: ignore + prior_confidence=( + value.prior_confidence.value if value.has_prior else "low" + ), + ) + elif isinstance(value, Integer): + classic_space[key] = neps.HPOInteger( + lower=value.min_value, + upper=value.max_value, + prior=value.prior if value.has_prior else None, + prior_confidence=( + value.prior_confidence.value if value.has_prior else "low" + ), + ) + elif isinstance(value, Float): + classic_space[key] = neps.HPOFloat( + lower=value.min_value, + upper=value.max_value, + prior=value.prior if value.has_prior else None, + prior_confidence=( + value.prior_confidence.value if value.has_prior else "low" + ), + ) + elif isinstance(value, Fidelity): + if isinstance(value._domain, Integer): + classic_space[key] = neps.HPOInteger( + lower=value._domain.min_value, + upper=value._domain.max_value, + is_fidelity=True, + ) + elif isinstance(value._domain, Float): + classic_space[key] = neps.HPOFloat( + lower=value._domain.min_value, + upper=value._domain.max_value, + is_fidelity=True, + ) + else: + classic_space[key] = neps.HPOConstant(value) + return convert_mapping(classic_space) + return None diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 9abc5edc5..1bc96b570 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -413,6 +413,13 @@ def __init__( self._choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T] if isinstance(choices, Sequence): self._choices = tuple(choice for choice in choices) + if any(isinstance(choice, tuple) for choice in self._choices) and any( + not isinstance(choice, tuple) for choice in self._choices + ): + self._choices = tuple( + (choice,) if not isinstance(choice, tuple) else choice + for choice in self._choices + ) else: self._choices = choices self._prior = prior @@ -853,7 +860,11 @@ def sample(self) -> int: """ if self._log: - raise NotImplementedError("TODO.") + return int( + math.exp( + random.uniform(math.log(self._min_value), math.log(self._max_value)) + ) + ) return int(random.randint(self._min_value, self._max_value)) def centered_around( diff --git a/neps/space/parameters.py b/neps/space/parameters.py index 9595a47ae..b7051a9a2 100644 --- a/neps/space/parameters.py +++ b/neps/space/parameters.py @@ -12,7 +12,7 @@ @dataclass -class Float: +class HPOFloat: """A float value for a parameter. This kind of parameter is used to represent hyperparameters with continuous float @@ -56,19 +56,19 @@ class Float: def __post_init__(self) -> None: if self.lower >= self.upper: raise ValueError( - f"Float parameter: bounds error (lower >= upper). Actual values: " + "Float parameter: bounds error (lower >= upper). Actual values: " f"lower={self.lower}, upper={self.upper}" ) if self.log and (self.lower <= 0 or self.upper <= 0): raise ValueError( - f"Float parameter: bounds error (log scale cant have bounds <= 0). " + "Float parameter: bounds error (log scale cant have bounds <= 0). " f"Actual values: lower={self.lower}, upper={self.upper}" ) if self.prior is not None and not self.lower <= self.prior <= self.upper: raise ValueError( - f"Float parameter: prior bounds error. Expected lower <= prior <= upper, " + "Float parameter: prior bounds error. Expected lower <= prior <= upper, " f"but got lower={self.lower}, prior={self.prior}, upper={self.upper}" ) @@ -77,14 +77,14 @@ def __post_init__(self) -> None: if self.is_fidelity and (self.lower < 0 or self.upper < 0): raise ValueError( - f"Float parameter: fidelity bounds error. Expected fidelity" + "Float parameter: fidelity bounds error. Expected fidelity" f" bounds to be >= 0, but got lower={self.lower}, " f" upper={self.upper}." ) if self.is_fidelity and self.prior is not None: raise ValueError( - f"Float parameter: Fidelity parameters " + "Float parameter: Fidelity parameters " f"cannot have a prior value. Got prior={self.prior}." ) @@ -99,7 +99,7 @@ def __post_init__(self) -> None: @dataclass -class Integer: +class HPOInteger: """An integer value for a parameter. This kind of parameter is used to represent hyperparameters with @@ -143,7 +143,7 @@ class Integer: def __post_init__(self) -> None: if self.lower >= self.upper: raise ValueError( - f"Integer parameter: bounds error (lower >= upper). Actual values: " + "Integer parameter: bounds error (lower >= upper). Actual values: " f"lower={self.lower}, upper={self.upper}" ) @@ -155,7 +155,7 @@ def __post_init__(self) -> None: upper_int = int(self.upper) if lower_int != self.lower or upper_int != self.upper: raise ValueError( - f"Integer parameter: bounds error (lower and upper must be integers). " + "Integer parameter: bounds error (lower and upper must be integers). " f"Actual values: lower={self.lower}, upper={self.upper}" ) @@ -164,26 +164,26 @@ def __post_init__(self) -> None: if self.is_fidelity and (self.lower < 0 or self.upper < 0): raise ValueError( - f"Integer parameter: fidelity bounds error. Expected fidelity" + "Integer parameter: fidelity bounds error. Expected fidelity" f" bounds to be >= 0, but got lower={self.lower}, " f" upper={self.upper}." ) if self.log and (self.lower <= 0 or self.upper <= 0): raise ValueError( - f"Integer parameter: bounds error (log scale cant have bounds <= 0). " + "Integer parameter: bounds error (log scale cant have bounds <= 0). " f"Actual values: lower={self.lower}, upper={self.upper}" ) if self.prior is not None and not self.lower <= self.prior <= self.upper: raise ValueError( - f"Integer parameter: Expected lower <= prior <= upper," + "Integer parameter: Expected lower <= prior <= upper," f"but got lower={self.lower}, prior={self.prior}, upper={self.upper}" ) if self.is_fidelity and self.prior is not None: raise ValueError( - f"Integer parameter: Fidelity parameters " + "Integer parameter: Fidelity parameters " f"cannot have a prior value. Got prior={self.prior}." ) @@ -192,7 +192,7 @@ def __post_init__(self) -> None: @dataclass -class Categorical: +class HPOCategorical: """A list of **unordered** choices for a parameter. This kind of parameter is used to represent hyperparameters that can take on a @@ -254,7 +254,7 @@ def __post_init__(self) -> None: @dataclass -class Constant: +class HPOConstant: """A constant value for a parameter. This kind of parameter is used to represent hyperparameters with values that @@ -284,7 +284,7 @@ def center(self) -> Any: return self.value -Parameter: TypeAlias = Float | Integer | Categorical +Parameter: TypeAlias = HPOFloat | HPOInteger | HPOCategorical """A type alias for all the parameter types. * [`Float`][neps.space.Float] diff --git a/neps/space/parsing.py b/neps/space/parsing.py index 2168d4f2b..5014f5880 100644 --- a/neps/space/parsing.py +++ b/neps/space/parsing.py @@ -10,7 +10,13 @@ from typing import TYPE_CHECKING, Any, TypeAlias from neps.space.neps_spaces.parameters import Pipeline -from neps.space.parameters import Categorical, Constant, Float, Integer, Parameter +from neps.space.parameters import ( + HPOCategorical, + HPOConstant, + HPOFloat, + HPOInteger, + Parameter, +) from neps.space.search_space import SearchSpace if TYPE_CHECKING: @@ -58,7 +64,7 @@ def scientific_parse(value: str | int | float) -> str | int | float: def as_parameter( # noqa: C901, PLR0911, PLR0912 details: SerializedParameter, -) -> Parameter | Constant: +) -> Parameter | HPOConstant: """Deduces the parameter type from details. Args: @@ -76,7 +82,7 @@ def as_parameter( # noqa: C901, PLR0911, PLR0912 # Constant case str() | int() | float(): val = scientific_parse(details) - return Constant(val) + return HPOConstant(val) # Bounds of float or int case tuple((x, y)): @@ -84,9 +90,9 @@ def as_parameter( # noqa: C901, PLR0911, PLR0912 _y = scientific_parse(y) match (_x, _y): case (int(), int()): - return Integer(_x, _y) + return HPOInteger(_x, _y) case (float(), float()): - return Float(_x, _y) + return HPOFloat(_x, _y) case _: raise ValueError( f"Expected both 'int' or 'float' for bounds but got {type(_x)=}" @@ -103,9 +109,9 @@ def as_parameter( # noqa: C901, PLR0911, PLR0912 _y = scientific_parse(y) match (_x, _y): case (int(), int()) if _x <= _y: # 2./3. - return Integer(_x, _y) + return HPOInteger(_x, _y) case (float(), float()) if _x <= _y: # 2./3. - return Float(_x, _y) + return HPOFloat(_x, _y) # Error case: # We do have two numbers, but of different types. This could @@ -123,7 +129,7 @@ def as_parameter( # noqa: C901, PLR0911, PLR0912 ) # At least one of them is a string, so we treat is as categorical. case _: - return Categorical(choices=[_x, _y]) + return HPOCategorical(choices=[_x, _y]) ## Categorical list of choices (tuple is reserved for bounds) case Sequence() if not isinstance(details, tuple): @@ -132,7 +138,7 @@ def as_parameter( # noqa: C901, PLR0911, PLR0912 # when specifying a grid. Hence, we map over the list and convert # what we can details = [scientific_parse(d) for d in details] - return Categorical(details) + return HPOCategorical(details) # Categorical dict declartion case {"choices": choices, **rest}: @@ -142,7 +148,7 @@ def as_parameter( # noqa: C901, PLR0911, PLR0912 # See note above about scientific notation elements choices = [scientific_parse(c) for c in choices] - return Categorical(choices, **rest) # type: ignore + return HPOCategorical(choices, **rest) # type: ignore # Constant dict declartion case {"value": v, **_rest}: @@ -153,7 +159,7 @@ def as_parameter( # noqa: C901, PLR0911, PLR0912 f" which indicates to treat value `{v}` a constant." ) - return Constant(v, **_rest) # type: ignore + return HPOConstant(v, **_rest) # type: ignore # Bounds dict declartion case {"lower": l, "upper": u, **rest}: @@ -163,15 +169,15 @@ def as_parameter( # noqa: C901, PLR0911, PLR0912 _type = rest.pop("type", None) match _type: case "int" | "integer": - return Integer(_x, _y, **rest) # type: ignore + return HPOInteger(_x, _y, **rest) # type: ignore case "float" | "floating": - return Float(_x, _y, **rest) # type: ignore + return HPOFloat(_x, _y, **rest) # type: ignore case None: match (_x, _y): case (int(), int()): - return Integer(_x, _y, **rest) # type: ignore + return HPOInteger(_x, _y, **rest) # type: ignore case (float(), float()): - return Float(_x, _y, **rest) # type: ignore + return HPOFloat(_x, _y, **rest) # type: ignore case _: raise ValueError( "Expected both 'int' or 'float' for bounds but" @@ -191,10 +197,10 @@ def as_parameter( # noqa: C901, PLR0911, PLR0912 def convert_mapping(pipeline_space: Mapping[str, Any]) -> SearchSpace: """Converts a dictionary to a SearchSpace object.""" - parameters: dict[str, Parameter | Constant] = {} + parameters: dict[str, Parameter | HPOConstant] = {} for name, details in pipeline_space.items(): match details: - case Float() | Integer() | Categorical() | Constant(): + case HPOFloat() | HPOInteger() | HPOCategorical() | HPOConstant(): parameters[name] = dataclasses.replace(details) # copy case str() | int() | float() | Mapping(): try: @@ -202,7 +208,7 @@ def convert_mapping(pipeline_space: Mapping[str, Any]) -> SearchSpace: except (TypeError, ValueError) as e: raise ValueError(f"Error parsing parameter '{name}'") from e case None: - parameters[name] = Constant(None) + parameters[name] = HPOConstant(None) case _: raise ValueError( f"Unrecognized parameter type '{type(details)}' for '{name}'." @@ -223,7 +229,7 @@ def convert_configspace(configspace: ConfigurationSpace) -> SearchSpace: """ import ConfigSpace as CS - space: dict[str, Parameter | Constant] = {} + space: dict[str, Parameter | HPOConstant] = {} if any(configspace.conditions) or any(configspace.forbidden_clauses): raise NotImplementedError( "The ConfigurationSpace has conditions or forbidden clauses, " @@ -233,9 +239,9 @@ def convert_configspace(configspace: ConfigurationSpace) -> SearchSpace: for name, hyperparameter in configspace.items(): match hyperparameter: case CS.Constant(): - space[name] = Constant(value=hyperparameter.value) + space[name] = HPOConstant(value=hyperparameter.value) case CS.CategoricalHyperparameter(): - space[name] = Categorical(hyperparameter.choices) # type: ignore + space[name] = HPOCategorical(hyperparameter.choices) # type: ignore case CS.OrdinalHyperparameter(): raise ValueError( "NePS does not support ordinals yet, please" @@ -243,14 +249,14 @@ def convert_configspace(configspace: ConfigurationSpace) -> SearchSpace: " categorical hyperparameter." ) case CS.UniformIntegerHyperparameter(): - space[name] = Integer( + space[name] = HPOInteger( lower=hyperparameter.lower, upper=hyperparameter.upper, log=hyperparameter.log, prior=None, ) case CS.UniformFloatHyperparameter(): - space[name] = Float( + space[name] = HPOFloat( lower=hyperparameter.lower, upper=hyperparameter.upper, log=hyperparameter.log, @@ -266,7 +272,7 @@ def convert_configspace(configspace: ConfigurationSpace) -> SearchSpace: UserWarning, stacklevel=2, ) - space[name] = Float( + space[name] = HPOFloat( lower=hyperparameter.lower, upper=hyperparameter.upper, log=hyperparameter.log, @@ -281,7 +287,7 @@ def convert_configspace(configspace: ConfigurationSpace) -> SearchSpace: UserWarning, stacklevel=2, ) - space[name] = Integer( + space[name] = HPOInteger( lower=hyperparameter.lower, upper=hyperparameter.upper, log=hyperparameter.log, diff --git a/neps/space/search_space.py b/neps/space/search_space.py index 2b0659f6a..3d727a535 100644 --- a/neps/space/search_space.py +++ b/neps/space/search_space.py @@ -9,23 +9,29 @@ from dataclasses import dataclass, field from typing import Any -from neps.space.parameters import Categorical, Constant, Float, Integer, Parameter +from neps.space.parameters import ( + HPOCategorical, + HPOConstant, + HPOFloat, + HPOInteger, + Parameter, +) # NOTE: The use of `Mapping` instead of `dict` is so that type-checkers # can check if we accidetally mutate these as we pass the parameters around. # We really should not, and instead make a copy if we really need to. @dataclass -class SearchSpace(Mapping[str, Parameter | Constant]): +class SearchSpace(Mapping[str, Parameter | HPOConstant]): """A container for parameters.""" - elements: Mapping[str, Parameter | Constant] = field(default_factory=dict) + elements: Mapping[str, Parameter | HPOConstant] = field(default_factory=dict) """All items in the search space.""" - categoricals: Mapping[str, Categorical] = field(init=False) + categoricals: Mapping[str, HPOCategorical] = field(init=False) """The categorical hyperparameters in the search space.""" - numerical: Mapping[str, Integer | Float] = field(init=False) + numerical: Mapping[str, HPOInteger | HPOFloat] = field(init=False) """The numerical hyperparameters in the search space. !!! note @@ -33,7 +39,7 @@ class SearchSpace(Mapping[str, Parameter | Constant]): This does not include fidelities. """ - fidelities: Mapping[str, Integer | Float] = field(init=False) + fidelities: Mapping[str, HPOInteger | HPOFloat] = field(init=False) """The fidelities in the search space. Currently no optimizer supports multiple fidelities but it is defined here incase. @@ -53,7 +59,7 @@ def searchables(self) -> Mapping[str, Parameter]: return {**self.numerical, **self.categoricals} @property - def fidelity(self) -> tuple[str, Float | Integer] | None: + def fidelity(self) -> tuple[str, HPOFloat | HPOInteger] | None: """The fidelity parameter for the search space.""" return None if len(self.fidelities) == 0 else next(iter(self.fidelities.items())) @@ -61,15 +67,15 @@ def __post_init__(self) -> None: # Ensure that we have a consistent order for all our items. self.elements = dict(sorted(self.elements.items(), key=lambda x: x[0])) - fidelities: dict[str, Float | Integer] = {} - numerical: dict[str, Float | Integer] = {} - categoricals: dict[str, Categorical] = {} + fidelities: dict[str, HPOFloat | HPOInteger] = {} + numerical: dict[str, HPOFloat | HPOInteger] = {} + categoricals: dict[str, HPOCategorical] = {} constants: dict[str, Any] = {} # Process the hyperparameters for name, hp in self.elements.items(): match hp: - case Float() | Integer() if hp.is_fidelity: + case HPOFloat() | HPOInteger() if hp.is_fidelity: # We should allow this at some point, but until we do, # raise an error if len(fidelities) >= 1: @@ -80,11 +86,11 @@ def __post_init__(self) -> None: ) fidelities[name] = hp - case Float() | Integer(): + case HPOFloat() | HPOInteger(): numerical[name] = hp - case Categorical(): + case HPOCategorical(): categoricals[name] = hp - case Constant(): + case HPOConstant(): constants[name] = hp.value case _: @@ -95,7 +101,7 @@ def __post_init__(self) -> None: self.constants = constants self.fidelities = fidelities - def __getitem__(self, key: str) -> Parameter | Constant: + def __getitem__(self, key: str) -> Parameter | HPOConstant: return self.elements[key] def __iter__(self) -> Iterator[str]: diff --git a/neps/status/status.py b/neps/status/status.py index 4d8d4421e..bafb39c45 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -7,14 +7,19 @@ from collections.abc import Sequence from dataclasses import asdict, dataclass, field from pathlib import Path +from typing import TYPE_CHECKING import numpy as np import pandas as pd from neps.runtime import get_workers_neps_state +from neps.space.neps_spaces import neps_space from neps.state.neps_state import FileLocker, NePSState from neps.state.trial import State, Trial +if TYPE_CHECKING: + from neps.space.neps_spaces.parameters import Pipeline + @dataclass class Summary: @@ -99,7 +104,9 @@ def num_pending(self) -> int: """Number of trials that are pending.""" return len(self.by_state[State.PENDING]) - def formatted(self) -> str: + def formatted( + self, pipeline_space_variables: tuple[Pipeline, list[str]] | None = None + ) -> str: """Return a formatted string of the summary.""" state_summary = "\n".join( f" {state.name.lower()}: {len(trials)}" @@ -114,13 +121,52 @@ def formatted(self) -> str: best_summary = "No best found yet." else: best_trial, best_objective_to_minimize = self.best + + # Format config based on whether pipeline_space_variables is provided + best_summary = ( f"# Best Found (config {best_trial.metadata.id}):" "\n" f"\n objective_to_minimize: {best_objective_to_minimize}" - f"\n config: {best_trial.config}" - f"\n path: {best_trial.metadata.location}" ) + if pipeline_space_variables is None: + best_summary += f"\n config: {best_trial.config}" + else: + pipeline_configs = [ + neps_space.config_string.ConfigString( + neps_space.convert_operation_to_string( + getattr( + neps_space.resolve(pipeline_space_variables[0])[0], + variable, + ) + ) + ).pretty_format() + for variable in pipeline_space_variables[1] + ] + for pipeline_config in pipeline_configs: + # Replace literal \t and \n with actual formatting + formatted_config = pipeline_config.replace("\\t", " ").replace( + "\\n", "\n" + ) + + # Add proper indentation to each line + lines = formatted_config.split("\n") + indented_lines = [] + for i, line in enumerate(lines): + if i == 0: + indented_lines.append( + line + ) # First line gets base indentation + else: + indented_lines.append( + " " + line + ) # Subsequent lines get extra indentation + + formatted_config = "\n".join(indented_lines) + best_summary += f"\n config:\n {formatted_config}" + + best_summary += f"\n path: {best_trial.metadata.location}" + assert best_trial.report is not None if best_trial.report.cost is not None: best_summary += f"\n cost: {best_trial.report.cost}" @@ -172,12 +218,17 @@ def status( root_directory: str | Path, *, print_summary: bool = False, + pipeline_space_variables: tuple[Pipeline, list[str]] | None = None, ) -> tuple[pd.DataFrame, pd.Series]: """Print status information of a neps run and return results. Args: root_directory: The root directory given to neps.run. - print_summary: If true, print a summary of the current run state + print_summary: If true, print a summary of the current run state. + pipeline_space_variables: If provided, this tuple contains the Pipeline and a + list of variable names to format the config in the summary. This is useful + for pipelines that have a complex configuration structure, allowing for a + more readable output. Returns: Dataframe of full results and short summary series. @@ -186,7 +237,7 @@ def status( summary = Summary.from_directory(root_directory) if print_summary: - print(summary.formatted()) + print(summary.formatted(pipeline_space_variables=pipeline_space_variables)) df = summary.df() diff --git a/neps_examples/basic_usage/hyperparameters.py b/neps_examples/basic_usage/hyperparameters.py index 6b736fcd3..88ff20918 100644 --- a/neps_examples/basic_usage/hyperparameters.py +++ b/neps_examples/basic_usage/hyperparameters.py @@ -7,6 +7,7 @@ # five hyperparameters and returns their sum. # Neps uses the default optimizer to minimize this objective function. + def evaluate_pipeline(float1, float2, categorical, integer1, integer2): objective_to_minimize = -float( np.sum([float1, float2, int(categorical), integer1, integer2]) @@ -14,18 +15,17 @@ def evaluate_pipeline(float1, float2, categorical, integer1, integer2): return objective_to_minimize -pipeline_space = dict( - float1=neps.Float(lower=0, upper=1), - float2=neps.Float(lower=-10, upper=10), - categorical=neps.Categorical(choices=[0, 1]), - integer1=neps.Integer(lower=0, upper=1), - integer2=neps.Integer(lower=1, upper=1000, log=True), -) +class PipelineSpace(neps.Pipeline): + float1=neps.Float(min_value=0, max_value=1) + float2=neps.Float(min_value=-10, max_value=10) + categorical=neps.Categorical(choices=(0, 1)) + integer1=neps.Integer(min_value=0, max_value=1) + integer2=neps.Integer(min_value=1, max_value=1000, log=True) logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, + pipeline_space=PipelineSpace(), root_directory="results/hyperparameters_example", post_run_summary=True, max_evaluations_total=30, diff --git a/neps_examples/convenience/logging_additional_info.py b/neps_examples/convenience/logging_additional_info.py index 6756e03c7..5487fa9d8 100644 --- a/neps_examples/convenience/logging_additional_info.py +++ b/neps_examples/convenience/logging_additional_info.py @@ -21,18 +21,18 @@ def evaluate_pipeline(float1, float2, categorical, integer1, integer2): } -pipeline_space = dict( - float1=neps.Float(lower=0, upper=1), - float2=neps.Float(lower=-10, upper=10), - categorical=neps.Categorical(choices=[0, 1]), - integer1=neps.Integer(lower=0, upper=1), - integer2=neps.Integer(lower=1, upper=1000, log=True), -) +class PipelineSpace(neps.Pipeline): + float1 = neps.Float(min_value=0, max_value=1) + float2 = neps.Float(min_value=-10, max_value=10) + categorical = neps.Categorical(choices=(0, 1)) + integer1 = neps.Integer(min_value=0, max_value=1) + integer2 = neps.Integer(min_value=1, max_value=1000, log=True) + logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, + pipeline_space=PipelineSpace(), root_directory="results/logging_additional_info", max_evaluations_total=5, ) diff --git a/neps_examples/convenience/neps_tblogger_tutorial.py b/neps_examples/convenience/neps_tblogger_tutorial.py index fd9bc8144..e0a5bd3d7 100644 --- a/neps_examples/convenience/neps_tblogger_tutorial.py +++ b/neps_examples/convenience/neps_tblogger_tutorial.py @@ -202,9 +202,7 @@ def training( optimizer.step() # Calculate validation objective_to_minimize using the objective_to_minimize_ev function. - validation_objective_to_minimize = objective_to_minimize_ev( - model, validation_loader - ) + validation_objective_to_minimize = objective_to_minimize_ev(model, validation_loader) return validation_objective_to_minimize @@ -212,14 +210,13 @@ def training( # Design the pipeline search spaces. -def pipeline_space() -> dict: - pipeline = dict( - lr=neps.Float(lower=1e-5, upper=1e-1, log=True), - optim=neps.Categorical(choices=["Adam", "SGD"]), - weight_decay=neps.Float(lower=1e-4, upper=1e-1, log=True), - ) +def pipeline_space() -> neps.Pipeline: + class PipelineSpace(neps.Pipeline): + lr = neps.Float(min_value=1e-5, max_value=1e-1, log=True) + optim = neps.Categorical(choices=("Adam", "SGD")) + weight_decay = neps.Float(min_value=1e-4, max_value=1e-1, log=True) - return pipeline + return PipelineSpace() ############################################################# @@ -229,13 +226,9 @@ def evaluate_pipeline(lr, optim, weight_decay): model = MLP() if optim == "Adam": - optimizer = torch.optim.Adam( - model.parameters(), lr=lr, weight_decay=weight_decay - ) + optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) elif optim == "SGD": - optimizer = torch.optim.SGD( - model.parameters(), lr=lr, weight_decay=weight_decay - ) + optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay) else: raise ValueError( "Optimizer choices are defined differently in the pipeline_space" diff --git a/neps_examples/convenience/running_on_slurm_scripts.py b/neps_examples/convenience/running_on_slurm_scripts.py index 86fe41ac2..04cf845e2 100644 --- a/neps_examples/convenience/running_on_slurm_scripts.py +++ b/neps_examples/convenience/running_on_slurm_scripts.py @@ -50,15 +50,15 @@ def evaluate_pipeline_via_slurm( return validation_error -pipeline_space = dict( - optimizer=neps.Categorical(choices=["sgd", "adam"]), - learning_rate=neps.Float(lower=10e-7, upper=10e-3, log=True), -) +class PipelineSpace(neps.Pipeline): + optimizer = neps.Categorical(choices=("sgd", "adam")) + learning_rate = neps.Float(min_value=10e-7, max_value=10e-3, log=True) + logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline_via_slurm, - pipeline_space=pipeline_space, + pipeline_space=PipelineSpace(), root_directory="results/slurm_script_example", max_evaluations_total=5, ) diff --git a/neps_examples/convenience/working_directory_per_pipeline.py b/neps_examples/convenience/working_directory_per_pipeline.py index de2ec9fd9..96864d6d3 100644 --- a/neps_examples/convenience/working_directory_per_pipeline.py +++ b/neps_examples/convenience/working_directory_per_pipeline.py @@ -18,16 +18,16 @@ def evaluate_pipeline(pipeline_directory: Path, float1, categorical, integer1): return objective_to_minimize -pipeline_space = dict( - float1=neps.Float(lower=0, upper=1), - categorical=neps.Categorical(choices=[0, 1]), - integer1=neps.Integer(lower=0, upper=1), -) +class PipelineSpace(neps.Pipeline): + float1 = neps.Float(min_value=0, max_value=1) + categorical = neps.Categorical(choices=(0, 1)) + integer1 = neps.Integer(min_value=0, max_value=1) + logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, + pipeline_space=PipelineSpace(), root_directory="results/working_directory_per_pipeline", max_evaluations_total=5, ) diff --git a/neps_examples/efficiency/expert_priors_for_hyperparameters.py b/neps_examples/efficiency/expert_priors_for_hyperparameters.py index 5980668a5..565f86e61 100644 --- a/neps_examples/efficiency/expert_priors_for_hyperparameters.py +++ b/neps_examples/efficiency/expert_priors_for_hyperparameters.py @@ -22,31 +22,37 @@ def evaluate_pipeline(some_float, some_integer, some_cat): # neps uses the default values and a confidence in this default value to construct a prior # that speeds up the search -pipeline_space = dict( - some_float=neps.Float( - lower=1, - upper=1000, - log=True, - prior=900, - prior_confidence="medium", - ), - some_integer=neps.Integer( - lower=0, - upper=50, - prior=35, - prior_confidence="low", - ), - some_cat=neps.Categorical( - choices=["a", "b", "c"], - prior="a", - prior_confidence="high", - ), -) +class PipelineSpace(neps.Pipeline): + some_float = ( + neps.Float( + min_value=1, + max_value=1000, + log=True, + prior=900, + prior_confidence="medium", + ), + ) + some_integer = ( + neps.Integer( + min_value=0, + max_value=50, + prior=35, + prior_confidence="low", + ), + ) + some_cat = ( + neps.Categorical( + choices=("a", "b", "c"), + prior=0, + prior_confidence="high", + ), + ) + logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, + pipeline_space=PipelineSpace(), root_directory="results/user_priors_example", max_evaluations_total=15, ) diff --git a/neps_examples/efficiency/multi_fidelity.py b/neps_examples/efficiency/multi_fidelity.py index c85d411e1..6b64bb4a5 100644 --- a/neps_examples/efficiency/multi_fidelity.py +++ b/neps_examples/efficiency/multi_fidelity.py @@ -46,8 +46,9 @@ def get_model_and_optimizer(learning_rate): def evaluate_pipeline( pipeline_directory: Path, # The path associated with this configuration - previous_pipeline_directory: Path - | None, # The path associated with any previous config + previous_pipeline_directory: ( + Path | None + ), # The path associated with any previous config learning_rate: float, epoch: int, ) -> dict: @@ -82,15 +83,15 @@ def evaluate_pipeline( ) -pipeline_space = dict( - learning_rate=neps.Float(lower=1e-4, upper=1e0, log=True), - epoch=neps.Integer(lower=1, upper=10, is_fidelity=True), -) +class PipelineSpace(neps.Pipeline): + learning_rate = neps.Float(min_value=1e-4, max_value=1e0, log=True) + epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=10)) + logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, + pipeline_space=PipelineSpace(), root_directory="results/multi_fidelity_example", # Optional: Do not start another evaluation after <=50 epochs, corresponds to cost # field above. diff --git a/neps_examples/efficiency/multi_fidelity_and_expert_priors.py b/neps_examples/efficiency/multi_fidelity_and_expert_priors.py index 96f7b2b3e..c7dd5d6d5 100644 --- a/neps_examples/efficiency/multi_fidelity_and_expert_priors.py +++ b/neps_examples/efficiency/multi_fidelity_and_expert_priors.py @@ -6,42 +6,39 @@ # This example demonstrates NePS uses both fidelity and expert priors to # optimize hyperparameters of a pipeline. + def evaluate_pipeline(float1, float2, integer1, fidelity): objective_to_minimize = -float(np.sum([float1, float2, integer1])) / fidelity return objective_to_minimize -pipeline_space = dict( - float1=neps.Float( - lower=1, - upper=1000, +class PipelineSpace(neps.Pipeline): + float1 = neps.Float( + min_value=1, + max_value=1000, log=False, prior=600, prior_confidence="medium", - ), - float2=neps.Float( - lower=-10, - upper=10, + ) + float2 = neps.Float( + min_value=-10, + max_value=10, prior=0, prior_confidence="medium", - ), - integer1=neps.Integer( - lower=0, - upper=50, + ) + integer1 = neps.Integer( + min_value=0, + max_value=50, prior=35, prior_confidence="low", - ), - fidelity=neps.Integer( - lower=1, - upper=10, - is_fidelity=True, - ), -) + ) + fidelity = neps.Fidelity(neps.Integer(min_value=1, max_value=10)) + logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, + pipeline_space=PipelineSpace(), root_directory="results/multifidelity_priors", max_evaluations_total=25, # For an alternate stopping method see multi_fidelity.py ) diff --git a/neps_examples/efficiency/pytorch_lightning_ddp.py b/neps_examples/efficiency/pytorch_lightning_ddp.py index 96b620de1..031c7d33d 100644 --- a/neps_examples/efficiency/pytorch_lightning_ddp.py +++ b/neps_examples/efficiency/pytorch_lightning_ddp.py @@ -11,7 +11,8 @@ class ToyModel(nn.Module): - """ Taken from https://pytorch.org/tutorials/intermediate/ddp_tutorial.html """ + """Taken from https://pytorch.org/tutorials/intermediate/ddp_tutorial.html""" + def __init__(self): super(ToyModel, self).__init__() self.net1 = nn.Linear(10, 10) @@ -21,6 +22,7 @@ def __init__(self): def forward(self, x): return self.net2(self.relu(self.net1(x))) + class LightningModel(L.LightningModule): def __init__(self, lr): super().__init__() @@ -51,6 +53,7 @@ def test_step(self, batch, batch_idx): def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=self.lr) + def evaluate_pipeline(lr=0.1, epoch=20): L.seed_everything(42) # Model @@ -70,35 +73,27 @@ def evaluate_pipeline(lr=0.1, epoch=20): test_dataloader = DataLoader(test_dataset, batch_size=20, shuffle=False) # Trainer with DDP Strategy - trainer = L.Trainer(gradient_clip_val=0.25, - max_epochs=epoch, - fast_dev_run=False, - strategy='ddp', - devices=NUM_GPU - ) + trainer = L.Trainer( + gradient_clip_val=0.25, + max_epochs=epoch, + fast_dev_run=False, + strategy="ddp", + devices=NUM_GPU, + ) trainer.fit(model, train_dataloader, val_dataloader) trainer.validate(model, test_dataloader) return trainer.logged_metrics["val_loss"].item() -pipeline_space = dict( - lr=neps.Float( - lower=0.001, - upper=0.1, - log=True, - prior=0.01 - ), - epoch=neps.Integer( - lower=1, - upper=3, - is_fidelity=True - ) - ) +class PipelineSpace(neps.Pipeline): + lr = neps.Float(min_value=0.001, max_value=0.1, log=True, prior=0.01) + epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=3)) + logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, + pipeline_space=PipelineSpace(), root_directory="results/pytorch_lightning_ddp", - max_evaluations_total=5 - ) + max_evaluations_total=5, +) diff --git a/neps_examples/efficiency/pytorch_lightning_fsdp.py b/neps_examples/efficiency/pytorch_lightning_fsdp.py index 6af3d6746..bc3c22df6 100644 --- a/neps_examples/efficiency/pytorch_lightning_fsdp.py +++ b/neps_examples/efficiency/pytorch_lightning_fsdp.py @@ -56,23 +56,13 @@ def evaluate_pipeline(lr=0.1, epoch=20): logging.basicConfig(level=logging.INFO) - pipeline_space = dict( - lr=neps.Float( - lower=0.0001, - upper=0.1, - log=True, - prior=0.01 - ), - epoch=neps.Integer( - lower=1, - upper=3, - is_fidelity=True - ) - ) + class PipelineSpace(neps.Pipeline): + lr = neps.Float(min_value=0.001, max_value=0.1, log=True, prior=0.01) + epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=3)) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, + pipeline_space=PipelineSpace(), root_directory="results/pytorch_lightning_fsdp", - max_evaluations_total=5 - ) + max_evaluations_total=5, + ) diff --git a/neps_examples/efficiency/pytorch_native_ddp.py b/neps_examples/efficiency/pytorch_native_ddp.py index 9ced5dc86..fab4592e0 100644 --- a/neps_examples/efficiency/pytorch_native_ddp.py +++ b/neps_examples/efficiency/pytorch_native_ddp.py @@ -1,4 +1,4 @@ -""" Some parts of this code are taken from https://pytorch.org/tutorials/intermediate/ddp_tutorial.html +"""Some parts of this code are taken from https://pytorch.org/tutorials/intermediate/ddp_tutorial.html Mind that this example does not run on Windows at the moment.""" @@ -32,8 +32,8 @@ def setup(rank, world_size): - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '12355' + os.environ["MASTER_ADDR"] = "localhost" + os.environ["MASTER_PORT"] = "12355" # initialize the process group dist.init_process_group("gloo", rank=rank, world_size=world_size) @@ -44,7 +44,8 @@ def cleanup(): class ToyModel(nn.Module): - """ Taken from https://pytorch.org/tutorials/intermediate/ddp_tutorial.html """ + """Taken from https://pytorch.org/tutorials/intermediate/ddp_tutorial.html""" + def __init__(self): super(ToyModel, self).__init__() self.net1 = nn.Linear(10, 10) @@ -56,7 +57,7 @@ def forward(self, x): def demo_basic(rank, world_size, loss_dict, learning_rate, epochs): - """ Taken from https://pytorch.org/tutorials/intermediate/ddp_tutorial.html (modified)""" + """Taken from https://pytorch.org/tutorials/intermediate/ddp_tutorial.html (modified)""" print(f"Running basic DDP example on rank {rank}.") setup(rank, world_size) @@ -88,28 +89,33 @@ def demo_basic(rank, world_size, loss_dict, learning_rate, epochs): def evaluate_pipeline(learning_rate, epochs): from torch.multiprocessing import Manager + world_size = NUM_GPU # Number of GPUs manager = Manager() loss_dict = manager.dict() - mp.spawn(demo_basic, - args=(world_size, loss_dict, learning_rate, epochs), - nprocs=world_size, - join=True) + mp.spawn( + demo_basic, + args=(world_size, loss_dict, learning_rate, epochs), + nprocs=world_size, + join=True, + ) loss = sum(loss_dict.values()) // world_size - return {'loss': loss} + return {"loss": loss} + +class PipelineSpace(neps.Pipeline): + learning_rate = neps.Float(min_value=10e-7, max_value=10e-3, log=True) + epochs = neps.Integer(min_value=1, max_value=3) -pipeline_space = dict( - learning_rate=neps.Float(lower=10e-7, upper=10e-3, log=True), - epochs=neps.Integer(lower=1, upper=3) -) -if __name__ == '__main__': +if __name__ == "__main__": logging.basicConfig(level=logging.INFO) - neps.run(evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, - root_directory="results/pytorch_ddp", - max_evaluations_total=25) + neps.run( + evaluate_pipeline=evaluate_pipeline, + pipeline_space=PipelineSpace(), + root_directory="results/pytorch_ddp", + max_evaluations_total=25, + ) diff --git a/neps_examples/efficiency/pytorch_native_fsdp.py b/neps_examples/efficiency/pytorch_native_fsdp.py index 1fec7bef3..531147908 100644 --- a/neps_examples/efficiency/pytorch_native_fsdp.py +++ b/neps_examples/efficiency/pytorch_native_fsdp.py @@ -24,18 +24,21 @@ size_based_auto_wrap_policy, ) -NUM_GPU = 8 # Number of GPUs to use for FSDP +NUM_GPU = 8 # Number of GPUs to use for FSDP + def setup(rank, world_size): - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '12355' + os.environ["MASTER_ADDR"] = "localhost" + os.environ["MASTER_PORT"] = "12355" # initialize the process group dist.init_process_group("nccl", rank=rank, world_size=world_size) + def cleanup(): dist.destroy_process_group() + class Net(nn.Module): def __init__(self): super(Net, self).__init__() @@ -62,6 +65,7 @@ def forward(self, x): output = F.log_softmax(x, dim=1) return output + def train(model, rank, world_size, train_loader, optimizer, epoch, sampler=None): model.train() ddp_loss = torch.zeros(2).to(rank) @@ -71,7 +75,7 @@ def train(model, rank, world_size, train_loader, optimizer, epoch, sampler=None) data, target = data.to(rank), target.to(rank) optimizer.zero_grad() output = model(data) - loss = F.nll_loss(output, target, reduction='sum') + loss = F.nll_loss(output, target, reduction="sum") loss.backward() optimizer.step() ddp_loss[0] += loss.item() @@ -79,7 +83,8 @@ def train(model, rank, world_size, train_loader, optimizer, epoch, sampler=None) dist.all_reduce(ddp_loss, op=dist.ReduceOp.SUM) if rank == 0: - print('Train Epoch: {} \tLoss: {:.6f}'.format(epoch, ddp_loss[0] / ddp_loss[1])) + print("Train Epoch: {} \tLoss: {:.6f}".format(epoch, ddp_loss[0] / ddp_loss[1])) + def test(model, rank, world_size, test_loader): model.eval() @@ -89,8 +94,12 @@ def test(model, rank, world_size, test_loader): for data, target in test_loader: data, target = data.to(rank), target.to(rank) output = model(data) - ddp_loss[0] += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss - pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability + ddp_loss[0] += F.nll_loss( + output, target, reduction="sum" + ).item() # sum up batch loss + pred = output.argmax( + dim=1, keepdim=True + ) # get the index of the max log-probability ddp_loss[1] += pred.eq(target.view_as(pred)).sum().item() ddp_loss[2] += len(data) @@ -99,43 +108,45 @@ def test(model, rank, world_size, test_loader): test_loss = math.inf if rank == 0: test_loss = ddp_loss[0] / ddp_loss[2] - print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format( - test_loss, int(ddp_loss[1]), int(ddp_loss[2]), - 100. * ddp_loss[1] / ddp_loss[2])) + print( + "Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n".format( + test_loss, + int(ddp_loss[1]), + int(ddp_loss[2]), + 100.0 * ddp_loss[1] / ddp_loss[2], + ) + ) return test_loss + def fsdp_main(rank, world_size, test_loss_tensor, lr, epochs, save_model=False): setup(rank, world_size) - transform=transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize((0.1307,), (0.3081,)) - ]) + transform = transforms.Compose( + [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] + ) - dataset1 = datasets.MNIST('./', train=True, download=True, - transform=transform) - dataset2 = datasets.MNIST('./', train=False, - transform=transform) + dataset1 = datasets.MNIST("./", train=True, download=True, transform=transform) + dataset2 = datasets.MNIST("./", train=False, transform=transform) - sampler1 = DistributedSampler(dataset1, rank=rank, num_replicas=world_size, shuffle=True) + sampler1 = DistributedSampler( + dataset1, rank=rank, num_replicas=world_size, shuffle=True + ) sampler2 = DistributedSampler(dataset2, rank=rank, num_replicas=world_size) - train_kwargs = {'batch_size': 64, 'sampler': sampler1} - test_kwargs = {'batch_size': 1000, 'sampler': sampler2} - cuda_kwargs = {'num_workers': 2, - 'pin_memory': True, - 'shuffle': False} + train_kwargs = {"batch_size": 64, "sampler": sampler1} + test_kwargs = {"batch_size": 1000, "sampler": sampler2} + cuda_kwargs = {"num_workers": 2, "pin_memory": True, "shuffle": False} train_kwargs.update(cuda_kwargs) test_kwargs.update(cuda_kwargs) - train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs) + train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs) test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs) my_auto_wrap_policy = functools.partial( size_based_auto_wrap_policy, min_num_params=100 ) torch.cuda.set_device(rank) - init_start_event = torch.cuda.Event(enable_timing=True) init_end_event = torch.cuda.Event(enable_timing=True) @@ -163,7 +174,10 @@ def fsdp_main(rank, world_size, test_loss_tensor, lr, epochs, save_model=False): if rank == 0: init_end_event.synchronize() - print(f"CUDA event elapsed time: {init_start_event.elapsed_time(init_end_event) / 1000}sec") + print( + "CUDA event elapsed time:" + f" {init_start_event.elapsed_time(init_end_event) / 1000}sec" + ) if save_model: # use a barrier to make sure training is done on all ranks @@ -173,16 +187,16 @@ def fsdp_main(rank, world_size, test_loss_tensor, lr, epochs, save_model=False): torch.save(states, "mnist_cnn.pt") cleanup() + def evaluate_pipeline(lr=0.1, epoch=20): torch.manual_seed(42) test_loss_tensor = torch.zeros(1) test_loss_tensor.share_memory_() - mp.spawn(fsdp_main, - args=(NUM_GPU, test_loss_tensor, lr, epoch), - nprocs=NUM_GPU, - join=True) + mp.spawn( + fsdp_main, args=(NUM_GPU, test_loss_tensor, lr, epoch), nprocs=NUM_GPU, join=True + ) loss = test_loss_tensor.item() return loss @@ -194,23 +208,13 @@ def evaluate_pipeline(lr=0.1, epoch=20): logging.basicConfig(level=logging.INFO) - pipeline_space = dict( - lr=neps.Float( - lower=0.0001, - upper=0.1, - log=True, - prior=0.01 - ), - epoch=neps.Integer( - lower=1, - upper=3, - is_fidelity=True - ) - ) + class PipelineSpace(neps.Pipeline): + lr = neps.Float(min_value=0.0001, max_value=0.1, log=True, prior=0.01) + epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=3)) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, + pipeline_space=PipelineSpace(), root_directory="results/pytorch_fsdp", - max_evaluations_total=20 - ) + max_evaluations_total=20, + ) diff --git a/neps_examples/experimental/freeze_thaw.py b/neps_examples/experimental/freeze_thaw.py index 597e1df3d..cd1fc9141 100644 --- a/neps_examples/experimental/freeze_thaw.py +++ b/neps_examples/experimental/freeze_thaw.py @@ -54,14 +54,10 @@ def training_pipeline( KeyError: If the specified optimizer is not supported. """ # Transformations applied on each image - transform = transforms.Compose( - [ - transforms.ToTensor(), - transforms.Normalize( - (0.1307,), (0.3081,) - ), # Mean and Std Deviation for MNIST - ] - ) + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)), # Mean and Std Deviation for MNIST + ]) # Loading MNIST dataset dataset = datasets.MNIST( @@ -84,8 +80,7 @@ def training_pipeline( if previous_pipeline_directory is not None: if (Path(previous_pipeline_directory) / "checkpoint.pt").exists(): states = torch.load( - Path(previous_pipeline_directory) / "checkpoint.pt", - weights_only=False + Path(previous_pipeline_directory) / "checkpoint.pt", weights_only=False ) model = states["model"] optimizer = states["optimizer"] @@ -154,16 +149,15 @@ def training_pipeline( if __name__ == "__main__": logging.basicConfig(level=logging.INFO) - pipeline_space = { - "learning_rate": neps.Float(1e-5, 1e-1, log=True), - "num_layers": neps.Integer(1, 5), - "num_neurons": neps.Integer(64, 128), - "weight_decay": neps.Float(1e-5, 0.1, log=True), - "epochs": neps.Integer(1, 10, is_fidelity=True), - } + class PipelineSpace(neps.Pipeline): + learning_rate = neps.Float(1e-5, 1e-1, log=True) + num_layers = neps.Integer(1, 5) + num_neurons = neps.Integer(64, 128) + weight_decay = neps.Float(1e-5, 0.1, log=True) + epochs = neps.Fidelity(neps.Integer(1, 10)) neps.run( - pipeline_space=pipeline_space, + pipeline_space=PipelineSpace(), evaluate_pipeline=training_pipeline, optimizer="ifbo", max_evaluations_total=50, diff --git a/neps_examples/neps_spaces/pytorch_nn_example.ipynb b/neps_examples/neps_spaces/pytorch_nn_example.ipynb index eaf91acc3..5fc9a6352 100644 --- a/neps_examples/neps_spaces/pytorch_nn_example.ipynb +++ b/neps_examples/neps_spaces/pytorch_nn_example.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 5, + "execution_count": 1, "id": "f3ca063f", "metadata": {}, "outputs": [], @@ -45,7 +45,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 54, "id": "4bda71ce", "metadata": {}, "outputs": [], @@ -97,12 +97,13 @@ " model = Operation(\n", " operator=nn.Sequential,\n", " args=Resampled(_model_ARGS),\n", - " )" + " )\n", + " a = 5" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "id": "17005669", "metadata": {}, "outputs": [ @@ -114,18 +115,18 @@ "\n", "Sequential(\n", " (0): Conv2d(3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", - " (1): Identity()\n", - " (2): Conv2d(3, 3, kernel_size=(1, 1), stride=(1, 1))\n", + " (1): Conv2d(3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (2): Identity()\n", ")\n", "\n", "\n", "Config string:\n", "\n", - "( ( {'in_channels': 3, 'out_channels': 3, 'kernel_size': 3, 'stride': 1, 'padding': 1}) () ())\n", + "( ( {'in_channels': 3, 'out_channels': 3, 'kernel_size': 3, 'stride': 1, 'padding': 1}) () ())\n", "\t01 :: \n", "\t\t02 :: {'in_channels': 3, 'out_channels': 3, 'kernel_size': 3, 'stride': 1, 'padding': 1}\n", - "\t\t02 :: \n", - "\t\t02 :: \n" + "\t\t02 :: \n", + "\t\t02 :: \n" ] } ], @@ -151,7 +152,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 4, "id": "9efeb556", "metadata": {}, "outputs": [], @@ -168,7 +169,50 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 73, + "id": "8a18e349", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NePSRandomSearch()\n", + "dict_keys(['model', 'a'])\n", + "\n" + ] + } + ], + "source": [ + "import yaml\n", + "import neps\n", + "from neps.optimizers import load_optimizer\n", + "from pathlib import Path\n", + "\n", + "import neps.space\n", + "import neps.space.neps_spaces\n", + "import neps.space.neps_spaces.neps_space\n", + "nn_space = NN_Space()\n", + "neps_state = neps.state.NePSState.create_or_load(path=Path(\"./results/compat/\"),load_only=True)\n", + "\n", + "print(load_optimizer(neps.algorithms.neps_random_search,nn_space)[0])\n", + "\n", + "\n", + "trial1 = neps_state.lock_and_sample_trial(optimizer=load_optimizer(neps.algorithms.neps_random_search,nn_space)[0], worker_id=\"1\")\n", + "import pprint\n", + "config = neps.space.neps_spaces.neps_space.NepsCompatConverter().from_neps_config(trial1.config)\n", + "resolved_pipeline, resolution_context = neps_space.resolve(pipeline=NN_Space(),\n", + " # Predefined samplings are the decisions made at each sampling step\n", + " domain_sampler=neps_space.OnlyPredefinedValuesSampler(predefined_samplings=config.predefined_samplings),\n", + " # Environment values are the fidelities and any arguments of the evaluation function not part of the search space\n", + " environment_values=config.environment_values)\n", + "print(resolved_pipeline.get_attrs().keys())\n", + "print(resolution_context)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, "id": "fa9cabbf", "metadata": {}, "outputs": [ @@ -180,154 +224,18 @@ "\n", " success: 5\n", "\n", - "# Best Found (config 2):\n", + "# Best Found (config 3):\n", "\n", - " objective_to_minimize: -1446.885986328125\n", - " config: {'SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 3, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 3, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[5].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[6].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[7].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 5, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6': 4, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[3].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[4].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[5].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[7].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[2].resampled_categorical::categorical__3': 2, 'SAMPLING__Resolvable.model.args[3].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[4].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[5].resampled_categorical::categorical__3': 0, 'SAMPLING__Resolvable.model.args[6].resampled_categorical::categorical__3': 1, 'SAMPLING__Resolvable.model.args[7].resampled_categorical::categorical__3': 1}\n", - " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spaces\\results\\neps_spaces_nn_example\\configs\\config_2\n" + " objective_to_minimize: -12986.689453125\n", + " config:\n", + " ( () () ())\n", + " \t01 :: \n", + " \t\t02 :: \n", + " \t\t02 :: \n", + " \t\t02 :: \n", + " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spaces\\results\\neps_spaces_nn_example\\configs\\config_3\n", + "Done.\n" ] - }, - { - "data": { - "text/plain": [ - "( config.SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 \\\n", - " id \n", - " 1 3 \n", - " 2 5 \n", - " 3 4 \n", - " 4 3 \n", - " 5 3 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", - " id \n", - " 1 4 \n", - " 2 1 \n", - " 3 \n", - " 4 2 \n", - " 5 1 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 0 \n", - " 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[1].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 2 \n", - " 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[0].resampled_operation.args[2].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 0 \n", - " 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[1].resampled_operation.args.resampled_categorical::categorical__4 \\\n", - " id \n", - " 1 3 \n", - " 2 \n", - " 3 \n", - " 4 2 \n", - " 5 0 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6 \\\n", - " id \n", - " 1 0 \n", - " 2 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4 \\\n", - " id \n", - " 1 2 \n", - " 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \\\n", - " id \n", - " 1 2 \n", - " 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " config.SAMPLING__Resolvable.model.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \\\n", - " id \n", - " 1 5 \n", - " 2 \n", - " 3 \n", - " 4 \n", - " 5 \n", - " \n", - " ... reported_as evaluation_duration \\\n", - " id ... \n", - " 1 ... success 0.054059 \n", - " 2 ... success 0.054146 \n", - " 3 ... success 0.000809 \n", - " 4 ... success 0.012381 \n", - " 5 ... success 0.002975 \n", - " \n", - " location state \\\n", - " id \n", - " 1 C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spa... State.SUCCESS \n", - " 2 C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spa... State.SUCCESS \n", - " 3 C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spa... State.SUCCESS \n", - " 4 C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spa... State.SUCCESS \n", - " 5 C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spa... State.SUCCESS \n", - " \n", - " sampling_worker_id time_sampled \\\n", - " id \n", - " 1 27288-2025-07-07T20:19:02.309700+00:00 1751919542.338901 \n", - " 2 27288-2025-07-07T20:19:02.309700+00:00 1751919542.423545 \n", - " 3 27288-2025-07-07T20:19:02.309700+00:00 1751919542.504156 \n", - " 4 27288-2025-07-07T20:19:02.309700+00:00 1751919542.529779 \n", - " 5 27288-2025-07-07T20:19:02.309700+00:00 1751919542.566814 \n", - " \n", - " evaluating_worker_id evaluation_duration \\\n", - " id \n", - " 1 27288-2025-07-07T20:19:02.309700+00:00 0.054059 \n", - " 2 27288-2025-07-07T20:19:02.309700+00:00 0.054146 \n", - " 3 27288-2025-07-07T20:19:02.309700+00:00 0.000809 \n", - " 4 27288-2025-07-07T20:19:02.309700+00:00 0.012381 \n", - " 5 27288-2025-07-07T20:19:02.309700+00:00 0.002975 \n", - " \n", - " time_started time_end \n", - " id \n", - " 1 1751919542.340261 1751919542.399379 \n", - " 2 1751919542.424551 1751919542.483701 \n", - " 3 1751919542.505031 1751919542.509259 \n", - " 4 1751919542.530671 1751919542.54665 \n", - " 5 1751919542.567737 1751919542.574195 \n", - " \n", - " [5 rows x 254 columns],\n", - " num_success 5.0\n", - " best_objective_to_minimize -1446.885986\n", - " best_config_id 2\n", - " SAMPLING__Resolvable.model.args.resampled_categorical::categorical__6 5\n", - " SAMPLING__Resolvable.model.args[0].resampled_operation.args.resampled_categorical::categorical__6 1\n", - " ... \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_categorical::categorical__3 \n", - " SAMPLING__Resolvable.model.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[2].resampled_categorical::categorical__3 \n", - " Length: 245, dtype: object)" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" } ], "source": [ @@ -338,19 +246,21 @@ "neps.run(\n", " evaluate_pipeline=evaluate_pipeline,\n", " pipeline_space=pipeline_space,\n", - " optimizer=neps.neps_algorithms.neps_random_search,\n", + " optimizer=neps.algorithms.neps_random_search,\n", " root_directory=\"results/neps_spaces_nn_example\",\n", " post_run_summary=True,\n", " max_evaluations_total=5,\n", " overwrite_working_directory=True,\n", ")\n", - "neps.status(\"results/neps_spaces_nn_example\", print_summary=True)" + "neps.status(\"results/neps_spaces_nn_example\", print_summary=True, pipeline_space_variables=(pipeline_space, [\"model\"]))\n", + "\n", + "print(\"Done.\")" ] } ], "metadata": { "kernelspec": { - "display_name": ".venv", + "display_name": "neural-pipeline-search", "language": "python", "name": "python3" }, diff --git a/neps_examples/real_world/image_segmentation_hpo.py b/neps_examples/real_world/image_segmentation_hpo.py index 2320f20f1..cc259b76d 100644 --- a/neps_examples/real_world/image_segmentation_hpo.py +++ b/neps_examples/real_world/image_segmentation_hpo.py @@ -21,27 +21,33 @@ def __init__(self, iters_per_epoch, lr, momentum, weight_decay): def training_step(self, batch): images, targets = batch - outputs = self.model(images)['out'] + outputs = self.model(images)["out"] loss = self.loss_fn(outputs, targets.long().squeeze(1)) self.log("train_loss", loss, sync_dist=True) return loss def validation_step(self, batch): images, targets = batch - outputs = self.model(images)['out'] + outputs = self.model(images)["out"] loss = self.loss_fn(outputs, targets.long().squeeze(1)) self.log("val_loss", loss, sync_dist=True) return loss def configure_optimizers(self): - optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr, momentum=self.momentum, weight_decay=self.weight_decay) + optimizer = torch.optim.SGD( + self.model.parameters(), + lr=self.lr, + momentum=self.momentum, + weight_decay=self.weight_decay, + ) scheduler = PolynomialLR( - optimizer, total_iters=self.iters_per_epoch * self.trainer.max_epochs, power=0.9 + optimizer, + total_iters=self.iters_per_epoch * self.trainer.max_epochs, + power=0.9, ) return [optimizer], [scheduler] - class SegmentationData(L.LightningDataModule): def __init__(self, batch_size=4): super().__init__() @@ -56,29 +62,62 @@ def train_dataloader(self): transform = transforms.Compose([ transforms.ToTensor(), transforms.Resize((256, 256), antialias=True), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) - target_transform = transforms.Compose([transforms.ToTensor(), transforms.Resize((256, 256), antialias=True)]) - train_dataset = datasets.VOCSegmentation(root=".data/VOC", transform=transform, target_transform=target_transform) - return torch.utils.data.DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=16, persistent_workers=True) + target_transform = transforms.Compose( + [transforms.ToTensor(), transforms.Resize((256, 256), antialias=True)] + ) + train_dataset = datasets.VOCSegmentation( + root=".data/VOC", transform=transform, target_transform=target_transform + ) + return torch.utils.data.DataLoader( + train_dataset, + batch_size=self.batch_size, + shuffle=True, + num_workers=16, + persistent_workers=True, + ) def val_dataloader(self): transform = transforms.Compose([ transforms.ToTensor(), transforms.Resize((256, 256), antialias=True), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) - target_transform = transforms.Compose([transforms.ToTensor(), transforms.Resize((256, 256), antialias=True)]) - val_dataset = datasets.VOCSegmentation(root=".data/VOC", year='2012', image_set='val', transform=transform, target_transform=target_transform) - return torch.utils.data.DataLoader(val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=16, persistent_workers=True) + target_transform = transforms.Compose( + [transforms.ToTensor(), transforms.Resize((256, 256), antialias=True)] + ) + val_dataset = datasets.VOCSegmentation( + root=".data/VOC", + year="2012", + image_set="val", + transform=transform, + target_transform=target_transform, + ) + return torch.utils.data.DataLoader( + val_dataset, + batch_size=self.batch_size, + shuffle=False, + num_workers=16, + persistent_workers=True, + ) def evaluate_pipeline(**kwargs): data = SegmentationData(kwargs.get("batch_size", 4)) data.prepare_data() iters_per_epoch = len(data.train_dataloader()) - model = LitSegmentation(iters_per_epoch, kwargs.get("lr", 0.02), kwargs.get("momentum", 0.9), kwargs.get("weight_decay", 1e-4)) - trainer = L.Trainer(max_epochs=kwargs.get("epoch", 30), strategy=DDPStrategy(find_unused_parameters=True), enable_checkpointing=False) + model = LitSegmentation( + iters_per_epoch, + kwargs.get("lr", 0.02), + kwargs.get("momentum", 0.9), + kwargs.get("weight_decay", 1e-4), + ) + trainer = L.Trainer( + max_epochs=kwargs.get("epoch", 30), + strategy=DDPStrategy(find_unused_parameters=True), + enable_checkpointing=False, + ) trainer.fit(model, data) val_loss = trainer.logged_metrics["val_loss"].detach().item() return val_loss @@ -92,38 +131,16 @@ def evaluate_pipeline(**kwargs): # Search space for hyperparameters pipeline_space = dict( - lr=neps.Float( - lower=0.0001, - upper=0.1, - log=True, - prior=0.02 - ), - momentum=neps.Float( - lower=0.1, - upper=0.9, - prior=0.5 - ), - weight_decay=neps.Float( - lower=1e-5, - upper=1e-3, - log=True, - prior=1e-4 - ), - epoch=neps.Integer( - lower=10, - upper=30, - is_fidelity=True - ), - batch_size=neps.Integer( - lower=4, - upper=12, - prior=4 - ), + lr=neps.HPOFloat(lower=0.0001, upper=0.1, log=True, prior=0.02), + momentum=neps.HPOFloat(lower=0.1, upper=0.9, prior=0.5), + weight_decay=neps.HPOFloat(lower=1e-5, upper=1e-3, log=True, prior=1e-4), + epoch=neps.HPOInteger(lower=10, upper=30, is_fidelity=True), + batch_size=neps.HPOInteger(lower=4, upper=12, prior=4), ) neps.run( evaluate_pipeline=evaluate_pipeline, pipeline_space=pipeline_space, root_directory="results/hpo_image_segmentation", - max_evaluations_total=500 + max_evaluations_total=500, ) diff --git a/tests/test_config_encoder.py b/tests/test_config_encoder.py index db4a5cee6..276bc566e 100644 --- a/tests/test_config_encoder.py +++ b/tests/test_config_encoder.py @@ -2,14 +2,14 @@ import torch -from neps.space import Categorical, ConfigEncoder, Float, Integer +from neps.space import ConfigEncoder, HPOCategorical, HPOFloat, HPOInteger def test_config_encoder_pdist_calculation() -> None: parameters = { - "a": Categorical(["cat", "mouse", "dog"]), - "b": Integer(1, 10), - "c": Float(1, 10), + "a": HPOCategorical(["cat", "mouse", "dog"]), + "b": HPOInteger(1, 10), + "c": HPOFloat(1, 10), } encoder = ConfigEncoder.from_parameters(parameters) config1 = {"a": "cat", "b": 1, "c": 1.0} @@ -43,9 +43,9 @@ def test_config_encoder_pdist_calculation() -> None: def test_config_encoder_pdist_squareform() -> None: parameters = { - "a": Categorical(["cat", "mouse", "dog"]), - "b": Integer(1, 10), - "c": Float(1, 10), + "a": HPOCategorical(["cat", "mouse", "dog"]), + "b": HPOInteger(1, 10), + "c": HPOFloat(1, 10), } encoder = ConfigEncoder.from_parameters(parameters) config1 = {"a": "cat", "b": 1, "c": 1.0} diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index 01d2cec55..263b69724 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -6,7 +6,7 @@ import neps import neps.optimizers -from neps.optimizers import neps_algorithms +import neps.optimizers.algorithms from neps.space.neps_spaces.parameters import ( Categorical, ConfidenceLevel, @@ -155,8 +155,8 @@ class DemoHyperparameterComplexSpace(Pipeline): @pytest.mark.parametrize( "optimizer", [ - neps_algorithms.neps_random_search, - neps_algorithms.neps_complex_random_search, + neps.optimizers.algorithms.neps_random_search, + neps.optimizers.algorithms.neps_complex_random_search, ], ) def test_hyperparameter_demo(optimizer): @@ -178,8 +178,8 @@ def test_hyperparameter_demo(optimizer): @pytest.mark.parametrize( "optimizer", [ - neps_algorithms.neps_random_search, - neps_algorithms.neps_complex_random_search, + neps.optimizers.algorithms.neps_random_search, + neps.optimizers.algorithms.neps_complex_random_search, ], ) def test_hyperparameter_with_fidelity_demo(optimizer): @@ -201,8 +201,8 @@ def test_hyperparameter_with_fidelity_demo(optimizer): @pytest.mark.parametrize( "optimizer", [ - neps_algorithms.neps_random_search, - neps_algorithms.neps_complex_random_search, + neps.optimizers.algorithms.neps_random_search, + neps.optimizers.algorithms.neps_complex_random_search, ], ) def test_hyperparameter_complex_demo(optimizer): @@ -326,8 +326,8 @@ class DemoOperationSpace(Pipeline): @pytest.mark.parametrize( "optimizer", [ - neps_algorithms.neps_random_search, - neps_algorithms.neps_complex_random_search, + neps.optimizers.algorithms.neps_random_search, + neps.optimizers.algorithms.neps_complex_random_search, ], ) def test_operation_demo(optimizer): diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index b0c533855..ab014490e 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -6,8 +6,7 @@ import pytest import neps -import neps.optimizers.algorithms as old_algorithms -from neps.optimizers import neps_algorithms +from neps import algorithms from neps.space.neps_spaces.parameters import ( ConfidenceLevel, Fidelity, @@ -34,34 +33,6 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): } -old_pipeline_space = { - "float1": neps.Float( - lower=1, - upper=1000, - log=False, - prior=600, - prior_confidence="medium", - ), - "float2": neps.Float( - lower=-100, - upper=100, - prior=0, - prior_confidence="medium", - ), - "integer1": neps.Integer( - lower=0, - upper=500, - prior=35, - prior_confidence="low", - ), - "fidelity": neps.Integer( - lower=1, - upper=100, - is_fidelity=True, - ), -} - - class DemoHyperparameterWithFidelitySpace(Pipeline): float1 = Float( min_value=1, @@ -94,27 +65,27 @@ class DemoHyperparameterWithFidelitySpace(Pipeline): ("optimizer", "optimizer_name"), [ ( - neps_algorithms.neps_random_search, + algorithms.neps_random_search, "new__RandomSearch", ), ( - neps_algorithms.neps_complex_random_search, + algorithms.neps_complex_random_search, "new__ComplexRandomSearch", ), ( - partial(neps_algorithms.neps_priorband, base="successive_halving"), + partial(algorithms.neps_priorband, base="successive_halving"), "new__priorband+successive_halving", ), ( - partial(neps_algorithms.neps_priorband, base="asha"), + partial(algorithms.neps_priorband, base="asha"), "new__priorband+asha", ), ( - partial(neps_algorithms.neps_priorband, base="async_hb"), + partial(algorithms.neps_priorband, base="async_hb"), "new__priorband+async_hb", ), ( - neps_algorithms.neps_priorband, + algorithms.neps_priorband, "new__priorband+hyperband", ), ], @@ -143,26 +114,26 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): ("optimizer", "optimizer_name"), [ ( - partial(old_algorithms.priorband, base="successive_halving"), + partial(algorithms.priorband, base="successive_halving"), "old__priorband+successive_halving", ), ( - partial(old_algorithms.priorband, base="asha"), + partial(algorithms.priorband, base="asha"), "old__priorband+asha", ), ( - partial(old_algorithms.priorband, base="async_hb"), + partial(algorithms.priorband, base="async_hb"), "old__priorband+async_hb", ), ( - old_algorithms.priorband, + algorithms.priorband, "old__priorband+hyperband", ), ], ) def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): optimizer.__name__ = optimizer_name # Needed by NEPS later. - pipeline_space = old_pipeline_space + pipeline_space = DemoHyperparameterWithFidelitySpace() root_directory = f"results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" # Reset the _COSTS global, so they do not get mixed up between tests. diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index 00cacc49e..4842eaabd 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -6,8 +6,7 @@ import pytest import neps -import neps.optimizers.algorithms as old_algorithms -from neps.optimizers import neps_algorithms +from neps.optimizers import algorithms from neps.space.neps_spaces.parameters import ( ConfidenceLevel, Fidelity, @@ -21,34 +20,6 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): return -float(np.sum([float1, float2, integer1])) * fidelity -old_pipeline_space = { - "float1": neps.Float( - lower=1, - upper=1000, - log=False, - prior=600, - prior_confidence="medium", - ), - "float2": neps.Float( - lower=-100, - upper=100, - prior=0, - prior_confidence="medium", - ), - "integer1": neps.Integer( - lower=0, - upper=500, - prior=35, - prior_confidence="low", - ), - "fidelity": neps.Integer( - lower=1, - upper=100, - is_fidelity=True, - ), -} - - class DemoHyperparameterWithFidelitySpace(Pipeline): float1 = Float( min_value=1, @@ -81,27 +52,27 @@ class DemoHyperparameterWithFidelitySpace(Pipeline): ("optimizer", "optimizer_name"), [ ( - neps_algorithms.neps_random_search, + algorithms.neps_random_search, "new__RandomSearch", ), ( - neps_algorithms.neps_complex_random_search, + algorithms.neps_complex_random_search, "new__ComplexRandomSearch", ), ( - partial(neps_algorithms.neps_priorband, base="successive_halving"), + partial(algorithms.neps_priorband, base="successive_halving"), "new__priorband+successive_halving", ), ( - partial(neps_algorithms.neps_priorband, base="asha"), + partial(algorithms.neps_priorband, base="asha"), "new__priorband+asha", ), ( - partial(neps_algorithms.neps_priorband, base="async_hb"), + partial(algorithms.neps_priorband, base="async_hb"), "new__priorband+async_hb", ), ( - neps_algorithms.neps_priorband, + algorithms.neps_priorband, "new__priorband+hyperband", ), ], @@ -127,26 +98,26 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): ("optimizer", "optimizer_name"), [ ( - partial(old_algorithms.priorband, base="successive_halving"), + partial(algorithms.priorband, base="successive_halving"), "old__priorband+successive_halving", ), ( - partial(old_algorithms.priorband, base="asha"), + partial(algorithms.priorband, base="asha"), "old__priorband+asha", ), ( - partial(old_algorithms.priorband, base="async_hb"), + partial(algorithms.priorband, base="async_hb"), "old__priorband+async_hb", ), ( - old_algorithms.priorband, + algorithms.priorband, "old__priorband+hyperband", ), ], ) def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): optimizer.__name__ = optimizer_name # Needed by NEPS later. - pipeline_space = old_pipeline_space + pipeline_space = DemoHyperparameterWithFidelitySpace() root_directory = f"results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" neps.run( diff --git a/tests/test_runtime/test_default_report_values.py b/tests/test_runtime/test_default_report_values.py index 3b37d5254..ab583a3ed 100644 --- a/tests/test_runtime/test_default_report_values.py +++ b/tests/test_runtime/test_default_report_values.py @@ -7,7 +7,7 @@ from neps.optimizers import OptimizerInfo from neps.optimizers.algorithms import random_search from neps.runtime import DefaultWorker -from neps.space import Float, SearchSpace +from neps.space import HPOFloat, SearchSpace from neps.state import ( DefaultReportValues, NePSState, @@ -33,7 +33,7 @@ def neps_state(tmp_path: Path) -> NePSState: def test_default_values_on_error( neps_state: NePSState, ) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues( @@ -85,7 +85,7 @@ def eval_function(*args, **kwargs) -> float: def test_default_values_on_not_specified( neps_state: NePSState, ) -> None: - optimizer = random_search(SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues( @@ -135,7 +135,7 @@ def eval_function(*args, **kwargs) -> float: def test_default_value_objective_to_minimize_curve_take_objective_to_minimize_value( neps_state: NePSState, ) -> None: - optimizer = random_search(SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues( diff --git a/tests/test_runtime/test_error_handling_strategies.py b/tests/test_runtime/test_error_handling_strategies.py index c1762a02a..38bd973d2 100644 --- a/tests/test_runtime/test_error_handling_strategies.py +++ b/tests/test_runtime/test_error_handling_strategies.py @@ -11,7 +11,7 @@ from neps.optimizers import OptimizerInfo from neps.optimizers.algorithms import random_search from neps.runtime import DefaultWorker -from neps.space import Float, SearchSpace +from neps.space import HPOFloat, SearchSpace from neps.state import ( DefaultReportValues, NePSState, @@ -44,7 +44,7 @@ def test_worker_raises_when_error_in_self( neps_state: NePSState, on_error: OnErrorPossibilities, ) -> None: - optimizer = random_search(SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=on_error, # <- Highlight default_report_values=DefaultReportValues(), @@ -84,7 +84,7 @@ def eval_function(*args, **kwargs) -> float: def test_worker_raises_when_error_in_other_worker(neps_state: NePSState) -> None: - optimizer = random_search(SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=OnErrorPossibilities.RAISE_ANY_ERROR, # <- Highlight default_report_values=DefaultReportValues(), @@ -144,7 +144,7 @@ def test_worker_does_not_raise_when_error_in_other_worker( neps_state: NePSState, on_error: OnErrorPossibilities, ) -> None: - optimizer = random_search(SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=on_error, # <- Highlight default_report_values=DefaultReportValues(), diff --git a/tests/test_runtime/test_stopping_criterion.py b/tests/test_runtime/test_stopping_criterion.py index 08fc3dbf3..5d59cace4 100644 --- a/tests/test_runtime/test_stopping_criterion.py +++ b/tests/test_runtime/test_stopping_criterion.py @@ -8,7 +8,7 @@ from neps.optimizers.algorithms import random_search from neps.optimizers.optimizer import OptimizerInfo from neps.runtime import DefaultWorker -from neps.space import Float, SearchSpace +from neps.space import HPOFloat, SearchSpace from neps.state import ( DefaultReportValues, NePSState, @@ -36,7 +36,7 @@ def neps_state(tmp_path: Path) -> NePSState: def test_max_evaluations_total_stopping_criterion( neps_state: NePSState, ) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -88,7 +88,7 @@ def eval_function(*args, **kwargs) -> float: def test_worker_evaluations_total_stopping_criterion( neps_state: NePSState, ) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -149,7 +149,7 @@ def eval_function(*args, **kwargs) -> float: def test_include_in_progress_evaluations_towards_maximum_with_work_eval_count( neps_state: NePSState, ) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -203,7 +203,7 @@ def eval_function(*args, **kwargs) -> float: def test_max_cost_total(neps_state: NePSState) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -251,7 +251,7 @@ def eval_function(*args, **kwargs) -> dict: def test_worker_cost_total(neps_state: NePSState) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -307,7 +307,7 @@ def eval_function(*args, **kwargs) -> dict: def test_worker_wallclock_time(neps_state: NePSState) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -362,7 +362,7 @@ def eval_function(*args, **kwargs) -> float: def test_max_worker_evaluation_time(neps_state: NePSState) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -418,7 +418,7 @@ def eval_function(*args, **kwargs) -> float: def test_max_evaluation_time_global(neps_state: NePSState) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": Float(0, 1)})) + optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), diff --git a/tests/test_search_space.py b/tests/test_search_space.py index 73073a0cc..7182463fa 100644 --- a/tests/test_search_space.py +++ b/tests/test_search_space.py @@ -2,12 +2,17 @@ import pytest -from neps import Categorical, Constant, Float, Integer, SearchSpace +from neps.space import SearchSpace +from neps.space.parameters import HPOCategorical, HPOConstant, HPOFloat, HPOInteger def test_search_space_orders_parameters_by_name(): - unsorted = SearchSpace({"b": Float(0, 1), "c": Float(0, 1), "a": Float(0, 1)}) - expected = SearchSpace({"a": Float(0, 1), "b": Float(0, 1), "c": Float(0, 1)}) + unsorted = SearchSpace( + {"b": HPOFloat(0, 1), "c": HPOFloat(0, 1), "a": HPOFloat(0, 1)} + ) + expected = SearchSpace( + {"a": HPOFloat(0, 1), "b": HPOFloat(0, 1), "c": HPOFloat(0, 1)} + ) assert unsorted == expected @@ -15,17 +20,20 @@ def test_multipe_fidelities_raises_error(): # We should allow this at some point, but until we do, raise an error with pytest.raises(ValueError, match="neps only supports one fidelity parameter"): SearchSpace( - {"a": Float(0, 1, is_fidelity=True), "b": Float(0, 1, is_fidelity=True)} + { + "a": HPOFloat(0, 1, is_fidelity=True), + "b": HPOFloat(0, 1, is_fidelity=True), + } ) def test_sorting_of_parameters_into_subsets(): elements = { - "a": Float(0, 1), - "b": Integer(0, 10), - "c": Categorical(["a", "b", "c"]), - "d": Float(0, 1, is_fidelity=True), - "x": Constant("x"), + "a": HPOFloat(0, 1), + "b": HPOInteger(0, 10), + "c": HPOCategorical(["a", "b", "c"]), + "d": HPOFloat(0, 1, is_fidelity=True), + "x": HPOConstant("x"), } space = SearchSpace(elements) assert space.elements == elements diff --git a/tests/test_search_space_parsing.py b/tests/test_search_space_parsing.py index 4fd2ea226..b94eb9781 100644 --- a/tests/test_search_space_parsing.py +++ b/tests/test_search_space_parsing.py @@ -4,7 +4,14 @@ import pytest -from neps.space import Categorical, Constant, Float, Integer, Parameter, parsing +from neps.space import ( + HPOCategorical, + HPOConstant, + HPOFloat, + HPOInteger, + Parameter, + parsing, +) @pytest.mark.parametrize( @@ -12,27 +19,27 @@ [ ( (0, 1), - Integer(0, 1), + HPOInteger(0, 1), ), ( ("1e3", "1e5"), - Integer(1e3, 1e5), + HPOInteger(1e3, 1e5), ), ( ("1e-3", "1e-1"), - Float(1e-3, 1e-1), + HPOFloat(1e-3, 1e-1), ), ( (1e-5, 1e-1), - Float(1e-5, 1e-1), + HPOFloat(1e-5, 1e-1), ), ( {"type": "float", "lower": 0.00001, "upper": "1e-1", "log": True}, - Float(0.00001, 0.1, log=True), + HPOFloat(0.00001, 0.1, log=True), ), ( {"type": "int", "lower": 3, "upper": 30, "is_fidelity": True}, - Integer(3, 30, is_fidelity=True), + HPOInteger(3, 30, is_fidelity=True), ), ( { @@ -42,27 +49,27 @@ "log": True, "is_fidelity": False, }, - Integer(100, 30000, log=True, is_fidelity=False), + HPOInteger(100, 30000, log=True, is_fidelity=False), ), ( {"type": "float", "lower": "3.3e-5", "upper": "1.5E-1"}, - Float(3.3e-5, 1.5e-1), + HPOFloat(3.3e-5, 1.5e-1), ), ( {"type": "cat", "choices": [2, "sgd", "10e-3"]}, - Categorical([2, "sgd", 0.01]), + HPOCategorical([2, "sgd", 0.01]), ), ( 0.5, - Constant(0.5), + HPOConstant(0.5), ), ( "1e3", - Constant(1000), + HPOConstant(1000), ), ( {"type": "cat", "choices": ["adam", "sgd", "rmsprop"]}, - Categorical(["adam", "sgd", "rmsprop"]), + HPOCategorical(["adam", "sgd", "rmsprop"]), ), ( { @@ -72,7 +79,7 @@ "prior": 3.3e-2, "prior_confidence": "high", }, - Float(0.00001, 0.1, log=True, prior=3.3e-2, prior_confidence="high"), + HPOFloat(0.00001, 0.1, log=True, prior=3.3e-2, prior_confidence="high"), ), ], ) diff --git a/tests/test_state/test_neps_state.py b/tests/test_state/test_neps_state.py index 98efd5026..0189e38e2 100644 --- a/tests/test_state/test_neps_state.py +++ b/tests/test_state/test_neps_state.py @@ -12,6 +12,7 @@ import pytest from pytest_cases import case, fixture, parametrize, parametrize_with_cases +import neps from neps.optimizers import ( AskFunction, OptimizerInfo, @@ -19,64 +20,61 @@ load_optimizer, ) from neps.optimizers.ask_and_tell import AskAndTell -from neps.space import ( +from neps.space import SearchSpace +from neps.space.neps_spaces.parameters import ( Categorical, - Constant, + Fidelity, Float, Integer, - SearchSpace, + Pipeline, ) from neps.state import BudgetInfo, NePSState, OptimizationState, SeedSnapshot @case -def case_search_space_no_fid() -> SearchSpace: - return SearchSpace( - { - "a": Float(0, 1), - "b": Categorical(["a", "b", "c"]), - "c": Constant("a"), - "d": Integer(0, 10), - } - ) +def case_search_space_no_fid() -> Pipeline: + class Space(Pipeline): + a = Float(0, 1) + b = Categorical(("a", "b", "c")) + c = "a" + d = Integer(0, 10) + + return Space() @case -def case_search_space_with_fid() -> SearchSpace: - return SearchSpace( - { - "a": Float(0, 1), - "b": Categorical(["a", "b", "c"]), - "c": Constant("a"), - "d": Integer(0, 10), - "e": Integer(1, 10, is_fidelity=True), - } - ) +def case_search_space_with_fid() -> Pipeline: + class SpaceFid(Pipeline): + a = Float(0, 1) + b = Categorical(("a", "b", "c")) + c = "a" + d = Integer(0, 10) + e = Fidelity(Integer(1, 10)) + + return SpaceFid() @case -def case_search_space_no_fid_with_prior() -> SearchSpace: - return SearchSpace( - { - "a": Float(0, 1, prior=0.5), - "b": Categorical(["a", "b", "c"], prior="a"), - "c": Constant("a"), - "d": Integer(0, 10, prior=5), - } - ) +def case_search_space_no_fid_with_prior() -> Pipeline: + class SpacePrior(Pipeline): + a = Float(0, 1, prior=0.5) + b = Categorical(("a", "b", "c"), prior=0) + c = "a" + d = Integer(0, 10, prior=5) + + return SpacePrior() @case -def case_search_space_fid_with_prior() -> SearchSpace: - return SearchSpace( - { - "a": Float(0, 1, prior=0.5), - "b": Categorical(["a", "b", "c"], prior="a"), - "c": Constant("a"), - "d": Integer(0, 10, prior=5), - "e": Integer(1, 10, is_fidelity=True), - } - ) +def case_search_space_fid_with_prior() -> Pipeline: + class SpaceFidPrior(Pipeline): + a = Float(0, 1, prior=0.5) + b = Categorical(("a", "b", "c"), prior=0) + c = "a" + d = Integer(0, 10, prior=5) + e = Fidelity(Integer(1, 10)) + + return SpaceFidPrior() # See issue #121 @@ -98,6 +96,7 @@ def case_search_space_fid_with_prior() -> SearchSpace: "priorband", "moasha", "mo_hyperband", + "neps_priorband", ] NO_DEFAULT_FIDELITY_SUPPORT = [ "random_search", @@ -122,35 +121,54 @@ def case_search_space_fid_with_prior() -> SearchSpace: "priorband", ] +REQUIRES_NEPS_SPACE = [ + "neps_priorband", + "neps_random_search", + "neps_complex_random_search", +] + @fixture @parametrize("key", list(PredefinedOptimizers.keys())) @parametrize_with_cases("search_space", cases=".", prefix="case_search_space") def optimizer_and_key_and_search_space( - key: str, search_space: SearchSpace -) -> tuple[AskFunction, str, SearchSpace]: + key: str, search_space: Pipeline +) -> tuple[AskFunction, str, Pipeline | SearchSpace]: if key in JUST_SKIP: pytest.xfail(f"{key} is not instantiable") if key in NO_DEFAULT_PRIOR_SUPPORT and any( - parameter.prior is not None for parameter in search_space.searchables.values() + parameter.has_prior if hasattr(parameter, "has_prior") else False + for parameter in search_space.get_attrs().values() ): pytest.xfail(f"{key} crashed with a prior") - if search_space.fidelity is not None and key in NO_DEFAULT_FIDELITY_SUPPORT: + if search_space.fidelity_attrs and key in NO_DEFAULT_FIDELITY_SUPPORT: pytest.xfail(f"{key} crashed with a fidelity") - if key in REQUIRES_FIDELITY and search_space.fidelity is None: + if key in REQUIRES_FIDELITY and not search_space.fidelity_attrs: pytest.xfail(f"{key} requires a fidelity parameter") - if key in REQUIRES_PRIOR and all( - parameter.prior is None for parameter in search_space.searchables.values() + if key in REQUIRES_PRIOR and not any( + parameter.has_prior if hasattr(parameter, "has_prior") else False + for parameter in search_space.get_attrs().values() ): pytest.xfail(f"{key} requires a prior") kwargs: dict[str, Any] = {} opt, _ = load_optimizer((key, kwargs), search_space) # type: ignore - return opt, key, search_space + converted_space = ( + neps.space.neps_spaces.neps_space.convert_neps_to_classic_search_space( + search_space + ) + ) + return ( + opt, + key, + converted_space + if converted_space and key not in REQUIRES_NEPS_SPACE + else search_space, + ) @parametrize("optimizer_info", [OptimizerInfo(name="blah", info={"a": "b"})]) @@ -177,7 +195,7 @@ def case_neps_state_filebased( @parametrize_with_cases("neps_state", cases=".", prefix="case_neps_state") def test_sample_trial( neps_state: NePSState, - optimizer_and_key_and_search_space: tuple[AskFunction, str, SearchSpace], + optimizer_and_key_and_search_space: tuple[AskFunction, str, Pipeline | SearchSpace], capsys, ) -> None: optimizer, key, search_space = optimizer_and_key_and_search_space @@ -191,8 +209,24 @@ def test_sample_trial( for k, v in trial1.config.items(): assert v is not None, f"'{k}' is None in {trial1.config}" - for name in search_space: - assert name in trial1.config, f"'{name}' is not in {trial1.config}" + if isinstance(search_space, SearchSpace): + for name in search_space: + assert name in trial1.config, f"'{name}' is not in {trial1.config}" + else: + config = neps.space.neps_spaces.neps_space.NepsCompatConverter().from_neps_config( + trial1.config + ) + resolved_pipeline, _ = neps.space.neps_spaces.neps_space.resolve( + pipeline=search_space, + domain_sampler=neps.space.neps_spaces.neps_space.OnlyPredefinedValuesSampler( + predefined_samplings=config.predefined_samplings + ), + environment_values=config.environment_values, + ) + for name in search_space.get_attrs(): + assert name in resolved_pipeline.get_attrs(), ( + f"'{name}' is not in {resolved_pipeline.get_attrs()}" + ) # HACK: Unfortunatly due to windows, who's time.time() is not very # precise, we need to introduce a sleep -_- @@ -207,8 +241,24 @@ def test_sample_trial( for k, v in trial1.config.items(): assert v is not None, f"'{k}' is None in {trial1.config}" - for name in search_space: - assert name in trial1.config, f"'{name}' is not in {trial1.config}" + if isinstance(search_space, SearchSpace): + for name in search_space: + assert name in trial1.config, f"'{name}' is not in {trial1.config}" + else: + config = neps.space.neps_spaces.neps_space.NepsCompatConverter().from_neps_config( + trial1.config + ) + resolved_pipeline, _ = neps.space.neps_spaces.neps_space.resolve( + pipeline=search_space, + domain_sampler=neps.space.neps_spaces.neps_space.OnlyPredefinedValuesSampler( + predefined_samplings=config.predefined_samplings + ), + environment_values=config.environment_values, + ) + for name in search_space.get_attrs(): + assert name in resolved_pipeline.get_attrs(), ( + f"'{name}' is not in {resolved_pipeline.get_attrs()}" + ) assert trial1 != trial2 @@ -219,7 +269,7 @@ def test_sample_trial( def test_optimizers_work_roughly( - optimizer_and_key_and_search_space: tuple[AskFunction, str, SearchSpace], + optimizer_and_key_and_search_space: tuple[AskFunction, str, Pipeline | SearchSpace], ) -> None: opt, key, search_space = optimizer_and_key_and_search_space ask_and_tell = AskAndTell(opt) From 6bfbd7c909545379dbefe067fd143374c0e8a6d7 Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 9 Jul 2025 16:36:09 +0200 Subject: [PATCH 027/156] Update documentation references and remove deprecated pipeline_space.md file - Corrected links in getting_started.md, evaluate_pipeline.md, and neps_run.md to point to neps_spaces.md. - Updated navigation in mkdocs.yml to reflect the removal of pipeline_space.md. - Enhanced clarity in algorithms.py and grid.py by updating parameter references. - Added validation in parameters.py to ensure prior_confidence is set when prior is defined. --- docs/getting_started.md | 4 +- docs/reference/evaluate_pipeline.md | 2 +- docs/reference/neps_run.md | 8 +- docs/reference/neps_spaces.md | 26 ++- docs/reference/pipeline_space.md | 108 ---------- .../search_algorithms/landing_page_algo.md | 2 +- mkdocs.yml | 3 +- neps/api.py | 200 ++++-------------- neps/optimizers/algorithms.py | 18 +- neps/optimizers/utils/grid.py | 6 +- neps/space/neps_spaces/parameters.py | 24 ++- neps/space/parameters.py | 8 +- 12 files changed, 111 insertions(+), 298 deletions(-) delete mode 100644 docs/reference/pipeline_space.md diff --git a/docs/getting_started.md b/docs/getting_started.md index 84c9237b1..cfec9cf48 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -13,7 +13,7 @@ pip install neural-pipeline-search ## The 3 Main Components -1. **Establish a [`pipeline_space=`](reference/pipeline_space.md)**: +1. **Establish a [`pipeline_space=`](reference/neps_spaces.md)**: ```python pipeline_space={ @@ -52,7 +52,7 @@ neps.run(evaluate_pipeline, pipeline_space) The [reference](reference/neps_run.md) section provides detailed information on the individual components of NePS. 1. How to use the [**`neps.run()`** function](reference/neps_run.md) to start the optimization process. -2. The different [search space](reference/pipeline_space.md) options available. +2. The different [search space](reference/neps_spaces.md) options available. 3. How to choose and configure the [optimizer](reference/optimizers.md) used. 4. How to define the [`evaluate_pipeline()` function](reference/evaluate_pipeline.md). 5. How to [analyze](reference/analyse.md) the optimization runs. diff --git a/docs/reference/evaluate_pipeline.md b/docs/reference/evaluate_pipeline.md index f23b6d663..7727dce63 100644 --- a/docs/reference/evaluate_pipeline.md +++ b/docs/reference/evaluate_pipeline.md @@ -12,7 +12,7 @@ We will show some basic usages and some functionalites this function would requi ### 1. Single Value -Assuming the `pipeline_space=` was already created (have a look at [pipeline space](./pipeline_space.md) for more details). +Assuming the `pipeline_space=` was already created (have a look at [pipeline space](./neps_spaces.md) for more details). A `evaluate_pipeline=` function with an objective of minimizing the loss will resemble the following: ```python diff --git a/docs/reference/neps_run.md b/docs/reference/neps_run.md index 5e023253f..3fe30a814 100644 --- a/docs/reference/neps_run.md +++ b/docs/reference/neps_run.md @@ -34,15 +34,15 @@ neps.run( It requires these configurations as input and should return either a dictionary or a sole loss value as the output. 2. This defines the search space for the configurations from which the optimizer samples. It accepts either a dictionary with the configuration names as keys, a path to a YAML configuration file, or a [`configSpace.ConfigurationSpace`](https://automl.github.io/ConfigSpace/) object. - For comprehensive information and examples, please refer to the detailed guide available [here](../reference/pipeline_space.md) + For comprehensive information and examples, please refer to the detailed guide available [here](../reference/neps_spaces.md) 3. The directory path where the information about the optimization and its progress gets stored. This is also used to synchronize multiple calls to `neps.run()` for parallelization. See the following for more: -* What kind of [pipeline space](../reference/pipeline_space.md) can you define? -* What goes in and what goes out of [`evaluate_pipeline()`](../reference/evaluate_pipeline.md)? +* What kind of [pipeline space](../reference/neps_spaces.md) can you define? +* What goes in and what goes out of [`evaluate_pipeline()`](../reference/neps_run.md)? ## Budget, how long to run? To define a budget, provide `max_evaluations_total=` to [`neps.run()`][neps.api.run], @@ -69,7 +69,7 @@ neps.run( 2. Prevents the initiation of new evaluations once this cost threshold is surpassed. This can be any kind of cost metric you like, such as time, energy, or monetary, as long as you can calculate it. This requires adding a cost value to the output of the `evaluate_pipeline` function, for example, return `#!python {'objective_to_minimize': loss, 'cost': cost}`. - For more details, please refer [here](../reference/evaluate_pipeline.md) + For more details, please refer [here](../reference/neps_spaces.md) ## Getting some feedback, logging NePS will not print anything to the console. To view the progress of workers, diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 3f795dc02..742889f1b 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -11,6 +11,7 @@ NePS Spaces provides a powerful framework for defining and optimizing complex se - [`Categorical`][neps.space.neps_spaces.parameters.Categorical]: Discrete categorical values - [`Fidelity`][neps.space.neps_spaces.parameters.Fidelity]: Special type for float or integer, [multi-fidelity](../reference/search_algorithms/multifidelity.md) parameters (e.g., epochs, dataset size) +Using these types, you can define the parameters that NePS will optimize during the search process. Additionally, **NePS spaces** can describe [complex (hierarchical) architectures](#hierarchies-and-architectures) using: - [`Operation`][neps.space.neps_spaces.parameters.Operation]: Define operations (e.g., convolution, pooling, activation) with arguments @@ -127,11 +128,11 @@ neps.run( !!! abstract "NePS Space-compatible optimizers" - Currently, NePS Spaces is compatible with these optimizers, which can be imported from [neps.neps_algorithms][neps.optimizers.neps_algorithms--neps-algorithms]: + Currently, NePS Spaces is compatible with these optimizers, which can be imported from [neps.algorithms][neps.optimizers.algorithms--neps-algorithms]: - - [`Random Search`][neps.optimizers.neps_algorithms.neps_random_search], which can sample the space uniformly at random - - [`Complex Random Search`][neps.optimizers.neps_algorithms.neps_complex_random_search], which can sample the space uniformly at random, using priors and mutating previously sampled configurations - - [`PriorBand`][neps.optimizers.neps_algorithms.neps_priorband], which uses [multi-fidelity](./search_algorithms/multifidelity.md) and the prior knowledge encoded in the NePS space + - [`Random Search`][neps.optimizers.algorithms.neps_random_search], which can sample the space uniformly at random + - [`Complex Random Search`][neps.optimizers.algorithms.neps_complex_random_search], which can sample the space uniformly at random, using priors and mutating previously sampled configurations + - [`PriorBand`][neps.optimizers.algorithms.neps_priorband], which uses [multi-fidelity](./search_algorithms/multifidelity.md) and the prior knowledge encoded in the NePS space ## Inspecting Configurations @@ -156,3 +157,20 @@ resolved_pipeline, resolution_context = neps_space.resolve(pipeline=NN_Space(), # The resolved_pipeline now contains all the parameters and their values, e.g. the Callable model model_callable = neps_space.convert_operation_to_callable(operation=resolved_pipeline.model) ``` + +## Using ConfigSpace + +For users familiar with the [`ConfigSpace`](https://automl.github.io/ConfigSpace/main/) library, +can also define the `pipeline_space` through `ConfigurationSpace()` + +```python +from configspace import ConfigurationSpace, Float + +configspace = ConfigurationSpace( + { + "learning_rate": Float("learning_rate", bounds=(1e-4, 1e-1), log=True) + "optimizer": ["adam", "sgd", "rmsprop"], + "dropout_rate": 0.5, + } +) +``` diff --git a/docs/reference/pipeline_space.md b/docs/reference/pipeline_space.md deleted file mode 100644 index 9844e42a3..000000000 --- a/docs/reference/pipeline_space.md +++ /dev/null @@ -1,108 +0,0 @@ -# Initializing the Pipeline Space - -In NePS, we need to define a `pipeline_space`. -This space can be structured through various approaches, including a Python dictionary, or ConfigSpace. -Each of these methods allows you to specify a set of parameter types, ranging from Float and Categorical to specialized architecture parameters. -Whether you choose a dictionary, or ConfigSpace, your selected method serves as a container or framework -within which these parameters are defined and organized. This section not only guides you through the process of -setting up your `pipeline_space` using these methods but also provides detailed instructions and examples on how to -effectively incorporate various parameter types, ensuring that NePS can utilize them in the optimization process. - - -## Parameters -NePS currently features 4 primary hyperparameter types: - -* [`Categorical`][neps.space.Categorical] -* [`Float`][neps.space.Float] -* [`Integer`][neps.space.Integer] -* [`Constant`][neps.space.Constant] - -Using these types, you can define the parameters that NePS will optimize during the search process. -The most basic way to pass these parameters is through a Python dictionary, where each key-value -pair represents a parameter name and its respective type. -For example, the following Python dictionary defines a `pipeline_space` with four parameters -for optimizing a deep learning model: - -```python -pipeline_space = { - "learning_rate": neps.Float(0.00001, 0.1, log=True), - "num_epochs": neps.Integer(3, 30, is_fidelity=True), - "optimizer": ["adam", "sgd", "rmsprop"], # Categorical - "dropout_rate": 0.5, # Constant -} - -neps.run(.., pipeline_space=pipeline_space) -``` - -??? example "Quick Parameter Reference" - - === "`Categorical`" - - ::: neps.space.Categorical - - === "`Float`" - - ::: neps.space.Float - - === "`Integer`" - - ::: neps.space.Integer - - === "`Constant`" - - ::: neps.space.Constant - - -## Using your knowledge, providing a Prior -When optimizing, you can provide your own knowledge using the parameter `prior=`. -By indicating a `prior=` we take this to be your user prior, -**your knowledge about where a good value for this parameter lies**. - -You can also specify a `prior_confidence=` to indicate how strongly you want NePS, -to focus on these, one of either `"low"`, `"medium"`, or `"high"`. - -```python -import neps - -neps.run( - ..., - pipeline_space={ - "learning_rate": neps.Float(1e-4, 1e-1, log=True, prior=1e-2, prior_confidence="medium"), - "num_epochs": neps.Integer(3, 30, is_fidelity=True), - "optimizer": neps.Categorical(["adam", "sgd", "rmsprop"], prior="adam", prior_confidence="low"), - "dropout_rate": neps.Constant(0.5), - } -) -``` - -!!! warning "Interaction with `is_fidelity`" - - If you specify `is_fidelity=True` and `prior=` for one parameter, this will raise an error. - -Currently the two major algorithms that exploit this in NePS are `PriorBand` -(prior-based `HyperBand`) and `PiBO`, a version of Bayesian Optimization which uses Priors. For more information on priors and algorithms using them, please refer to the [prior documentation](../reference/search_algorithms/prior.md). - -## Using ConfigSpace - -For users familiar with the [`ConfigSpace`](https://automl.github.io/ConfigSpace/main/) library, -can also define the `pipeline_space` through `ConfigurationSpace()` - -```python -from configspace import ConfigurationSpace, Float - -configspace = ConfigurationSpace( - { - "learning_rate": Float("learning_rate", bounds=(1e-4, 1e-1), log=True) - "optimizer": ["adam", "sgd", "rmsprop"], - "dropout_rate": 0.5, - } -) -``` - -!!! warning - - Parameters you wish to use as a **fidelity** are not support through ConfigSpace - at this time. - -For additional information on ConfigSpace and its features, please visit the following -[link](https://github.com/automl/ConfigSpace). diff --git a/docs/reference/search_algorithms/landing_page_algo.md b/docs/reference/search_algorithms/landing_page_algo.md index 7f7be891e..36e5558e1 100644 --- a/docs/reference/search_algorithms/landing_page_algo.md +++ b/docs/reference/search_algorithms/landing_page_algo.md @@ -36,7 +36,7 @@ We present a collection of MF-algorithms [here](./multifidelity.md) and algorith ## What are Priors? -Priors are used when there exists some information about the search space, that can be used to guide the optimization process. This information could come from expert domain knowledge or previous experiments. A Prior is provided in the form of a distribution over one dimension of the search space, with a `mean` (the suspected optimum) and a `confidence level`, or `variance`. We discuss how Priors can be included in your NePS-search space [here](../../reference/pipeline_space.md#using-your-knowledge-providing-a-prior). +Priors are used when there exists some information about the search space, that can be used to guide the optimization process. This information could come from expert domain knowledge or previous experiments. A Prior is provided in the form of a distribution over one dimension of the search space, with a `mean` (the suspected optimum) and a `confidence level`, or `variance`. We discuss how Priors can be included in your NePS-search space [here](../../reference/neps_spaces.md#hpo-search-spaces). !!! tip "Advantages of using Priors" diff --git a/mkdocs.yml b/mkdocs.yml index efb481a56..1218edc6a 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -139,8 +139,7 @@ nav: - Getting Started: 'getting_started.md' - Reference: - Run: 'reference/neps_run.md' - - Search Space: 'reference/pipeline_space.md' - - NePS-Spaces: 'reference/neps_spaces.md' + - NePS Spaces: 'reference/neps_spaces.md' - The Evaluate Function: 'reference/evaluate_pipeline.md' - Analysing Runs: 'reference/analyse.md' - Optimizer: 'reference/optimizers.md' diff --git a/neps/api.py b/neps/api.py index 4e3b84ed0..ee56aca03 100644 --- a/neps/api.py +++ b/neps/api.py @@ -77,30 +77,29 @@ def evaluate_pipeline(some_parameter: float) -> float: validation_error = -some_parameter return validation_error - pipeline_space = dict(some_parameter=neps.Float(lower=0, upper=1)) + PipelineSpace(Pipeline): + dataset = "mnist" # constant + nlayers = neps.Integer(2,10) # integer + alpha = neps.Float(0.1, 1.0) # float + optimizer = neps.Categorical( # categorical + ("adam", "sgd", "rmsprop") + ) + learning_rate = neps.Float( # log spaced float + min_value=1e-5, max_value=1, log=True + ) + epochs = neps.Fidelity( # fidelity integer + neps.Integer(1, 100) + ) + batch_size = neps.Integer( # integer with a prior + min_value=32, + max_value=512, + prior=128, + prior_confidence="medium" + ) + neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space={ - "some_parameter": (0.0, 1.0), # float - "another_parameter": (0, 10), # integer - "optimizer": ["sgd", "adam"], # categorical - "epoch": neps.Integer( # fidelity integer - lower=1, - upper=100, - is_fidelity=True - ), - "learning_rate": neps.Float( # log spaced float - lower=1e-5, - uperr=1, - log=True - ), - "alpha": neps.Float( # float with a prior - lower=0.1, - upper=1.0, - prior=0.99, - prior_confidence="high", - ) - }, + pipeline_space=PipelineSpace(), root_directory="usage_example", max_evaluations_total=5, ) @@ -134,25 +133,28 @@ def evaluate_pipeline(some_parameter: float) -> float: This most direct way to specify the search space is as follows: ```python - neps.run( - pipeline_space={ - "dataset": "mnist", # constant - "nlayers": (2, 10), # integer - "alpha": (0.1, 1.0), # float - "optimizer": [ # categorical - "adam", "sgd", "rmsprop" - ], - "learning_rate": neps.Float(, # log spaced float - lower=1e-5, upper=1, log=True - ), - "epochs": neps.Integer( # fidelity integer - lower=1, upper=100, is_fidelity=True - ), - "batch_size": neps.Integer( # integer with a prior - lower=32, upper=512, prior=128 - ), + PipelineSpace(Pipeline): + dataset = "mnist" # constant + nlayers = neps.Integer(2,10) # integer + alpha = neps.Float(0.1, 1.0) # float + optimizer = neps.Categorical( # categorical + ("adam", "sgd", "rmsprop") + ) + learning_rate = neps.Float( # log spaced float + min_value=1e-5, max_value=1, log=True + ) + epochs = neps.Fidelity( # fidelity integer + neps.Integer(1, 100) + ) + batch_size = neps.Integer( # integer with a prior + min_value=32, + max_value=512, + prior=128, + prior_confidence="medium" + ) - } + neps.run( + pipeline_space=PipelineSpace() ) ``` @@ -164,29 +166,8 @@ def evaluate_pipeline(some_parameter: float) -> float: * `prior=`: If you have a good idea about what a good setting for a parameter may be, you can set this as the prior for - a parameter. You can specify this along with `prior_confidence` - if you would like to assign a `"low"`, `"medium"`, or `"high"` - confidence to the prior. - - - !!! note "Yaml support" - - To support spaces defined in yaml, you may also define the parameters - as dictionarys, e.g., - - ```python - neps.run( - pipeline_space={ - "dataset": "mnist", - "nlayers": {"type": "int", "lower": 2, "upper": 10}, - "alpha": {"type": "float", "lower": 0.1, "upper": 1.0}, - "optimizer": {"type": "cat", "choices": ["adam", "sgd", "rmsprop"]}, - "learning_rate": {"type": "float", "lower": 1e-5, "upper": 1, "log": True}, - "epochs": {"type": "int", "lower": 1, "upper": 100, "is_fidelity": True}, - "batch_size": {"type": "int", "lower": 32, "upper": 512, "prior": 128}, - } - ) - ``` + a parameter. You specify this along with `prior_confidence` + to assign a `"low"`, `"medium"`, or `"high"`confidence to the prior. !!! note "ConfigSpace support" @@ -256,98 +237,7 @@ def evaluate_pipeline(some_parameter: float) -> float: ??? note "Available optimizers" - --- - - * `#!python "bayesian_optimization"`, - - ::: neps.optimizers.algorithms.bayesian_optimization - options: - show_root_heading: false - show_signature: false - show_source: false - - --- - - * `#!python "ifbo"` - - ::: neps.optimizers.algorithms.ifbo - options: - show_root_heading: false - show_signature: false - show_source: false - - --- - - * `#!python "successive_halving"`: - - ::: neps.optimizers.algorithms.successive_halving - options: - show_root_heading: false - show_signature: false - show_source: false - - --- - - * `#!python "hyperband"`: - - ::: neps.optimizers.algorithms.hyperband - options: - show_root_heading: false - show_signature: false - show_source: false - - --- - - * `#!python "priorband"`: - - ::: neps.optimizers.algorithms.priorband - options: - show_root_heading: false - show_signature: false - show_source: false - - --- - - * `#!python "asha"`: - - ::: neps.optimizers.algorithms.asha - options: - show_root_heading: false - show_signature: false - show_source: false - - --- - - * `#!python "async_hb"`: - - ::: neps.optimizers.algorithms.async_hb - options: - show_root_heading: false - show_signature: false - show_source: false - - --- - - * `#!python "random_search"`: - - ::: neps.optimizers.algorithms.random_search - options: - show_root_heading: false - show_signature: false - show_source: false - - --- - - * `#!python "grid_search"`: - - ::: neps.optimizers.algorithms.grid_search - options: - show_root_heading: false - show_signature: false - show_source: false - - --- - + See the [optimizers documentation](../../reference/search_algorithms/landing_page_algo.md) for a list of available optimizers. With any optimizer choice, you also may provide some additional parameters to the optimizers. We do not recommend this unless you are familiar with the optimizer you are using. You diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 0cf751d96..5d0195ac6 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -1,4 +1,6 @@ -"""The selection of optimization algorithms available in NePS. +"""NePS Algorithms +=========== +The selection of optimization algorithms available in NePS. This module conveniently starts with 'a' to be at the top and is where most of the code documentation for optimizers can be found. @@ -722,7 +724,7 @@ def successive_halving( or `#!python sampler="prior"`. Args: - space: The search space to sample from. + pipeline_space: The search space to sample from. eta: The reduction factor used for building brackets early_stopping_rate: Determines the number of rungs in a bracket Choosing 0 creates maximal rungs given the fidelity bounds. @@ -797,7 +799,7 @@ def hyperband( as this algorithm could be considered an extension of it. Args: - space: The search space to sample from. + pipeline_space: The search space to sample from. eta: The reduction factor used for building brackets sampler: The type of sampling procedure to use: @@ -904,7 +906,7 @@ def asha( as this algorithm could be considered an extension of it. Args: - space: The search space to sample from. + pipeline_space: The search space to sample from. eta: The reduction factor used for building brackets sampler: The type of sampling procedure to use: @@ -1007,7 +1009,7 @@ def async_hb( takes elements from each. Args: - space: The search space to sample from. + pipeline_space: The search space to sample from. eta: The reduction factor used for building brackets sampler: The type of sampling procedure to use: @@ -1078,7 +1080,7 @@ def priorband( See: https://openreview.net/forum?id=uoiwugtpCH¬eId=xECpK2WH6k Args: - space: The search space to sample from. + pipeline_space: The search space to sample from. eta: The reduction factor used for building brackets sample_prior_first: Whether to sample the prior configuration first. base: The base algorithm to use for the bracketing. @@ -1150,7 +1152,7 @@ def bayesian_optimization( acquisition function. Args: - space: The search space to sample from. + pipeline_space: The search space to sample from. initial_design_size: Number of samples used before using the surrogate model. If "ndim", it will use the number of parameters in the search space. cost_aware: Whether to consider reported "cost" from configurations in decision @@ -1241,7 +1243,7 @@ def pibo( has. Args: - space: The search space to sample from. + pipeline_space: The search space to sample from. initial_design_size: Number of samples used before using the surrogate model. If "ndim", it will use the number of parameters in the search space. cost_aware: Whether to consider reported "cost" from configurations in decision diff --git a/neps/optimizers/utils/grid.py b/neps/optimizers/utils/grid.py index b6ba5ff0c..aa152c66c 100644 --- a/neps/optimizers/utils/grid.py +++ b/neps/optimizers/utils/grid.py @@ -23,13 +23,13 @@ def make_grid( ) -> list[dict[str, Any]]: """Get a grid of configurations from the search space. - For [`Float`][neps.space.Float] and [`Integer`][neps.space.Integer] + For [`Float`][neps.space.HPOFloat] and [`Integer`][neps.space.HPOInteger] the parameter `size_per_numerical_hp=` is used to determine a grid. - For [`Categorical`][neps.space.Categorical] + For [`Categorical`][neps.space.HPOCategorical] hyperparameters, we include all the choices in the grid. - For [`Constant`][neps.space.Constant] hyperparameters, + For [`Constant`][neps.space.HPOConstant] hyperparameters, we include the constant value in the grid. Args: diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 1bc96b570..d937c7625 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -428,6 +428,10 @@ def __init__( if isinstance(prior_confidence, str) else prior_confidence ) + if self._prior is not _UNSET and self._prior_confidence is _UNSET: + raise ValueError( + "If prior is set, prior_confidence must also be set to a valid value." + ) @property def min_value(self) -> int: @@ -481,7 +485,7 @@ def prior(self) -> int: """ if not self.has_prior: - raise ValueError("Domain has no prior defined.") + raise ValueError("Domain has no prior and prior_confidence defined.") return int(cast(int, self._prior)) @property @@ -496,7 +500,7 @@ def prior_confidence(self) -> ConfidenceLevel: """ if not self.has_prior: - raise ValueError("Domain has no prior defined.") + raise ValueError("Domain has no prior and prior_confidence defined.") return cast(ConfidenceLevel, self._prior_confidence) @property @@ -600,6 +604,10 @@ def __init__( if isinstance(prior_confidence, str) else prior_confidence ) + if self._prior is not _UNSET and self._prior_confidence is _UNSET: + raise ValueError( + "If prior is set, prior_confidence must also be set to a valid value." + ) @property def min_value(self) -> float: @@ -649,7 +657,7 @@ def prior(self) -> float: """ if not self.has_prior: - raise ValueError("Domain has no prior defined.") + raise ValueError("Domain has no prior and prior_confidence defined.") return float(cast(float, self._prior)) @property @@ -664,7 +672,7 @@ def prior_confidence(self) -> ConfidenceLevel: """ if not self.has_prior: - raise ValueError("Domain has no prior defined.") + raise ValueError("Domain has no prior and prior_confidence defined.") return cast(ConfidenceLevel, self._prior_confidence) @property @@ -770,6 +778,10 @@ def __init__( if isinstance(prior_confidence, str) else prior_confidence ) + if self._prior != _UNSET and self._prior_confidence is _UNSET: + raise ValueError( + "If prior is set, prior_confidence must also be set to a valid value." + ) @property def min_value(self) -> int: @@ -819,7 +831,7 @@ def prior(self) -> int: """ if not self.has_prior: - raise ValueError("Domain has no prior defined.") + raise ValueError("Domain has no prior and prior_confidence defined.") return int(cast(int, self._prior)) @property @@ -834,7 +846,7 @@ def prior_confidence(self) -> ConfidenceLevel: """ if not self.has_prior: - raise ValueError("Domain has no prior defined.") + raise ValueError("Domain has no prior and prior_confidence defined.") return cast(ConfidenceLevel, self._prior_confidence) @property diff --git a/neps/space/parameters.py b/neps/space/parameters.py index b7051a9a2..723b1fd38 100644 --- a/neps/space/parameters.py +++ b/neps/space/parameters.py @@ -287,9 +287,9 @@ def center(self) -> Any: Parameter: TypeAlias = HPOFloat | HPOInteger | HPOCategorical """A type alias for all the parameter types. -* [`Float`][neps.space.Float] -* [`Integer`][neps.space.Integer] -* [`Categorical`][neps.space.Categorical] +* [`Float`][neps.space.HPOFloat] +* [`Integer`][neps.space.HPOInteger] +* [`Categorical`][neps.space.HPOCategorical] -A [`Constant`][neps.space.Constant] is not included as it does not change value. +A [`Constant`][neps.space.HPOConstant] is not included as it does not change value. """ From 846b45d03d691adf165493be4745cafdfc48af1d Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 9 Jul 2025 17:05:49 +0200 Subject: [PATCH 028/156] Refactor NePS examples and documentation to enhance clarity and usability, including the addition of a PyTorch neural network example and removal of outdated notebook. --- docs/getting_started.md | 19 +- docs/index.md | 21 +- docs/reference/neps_spaces.md | 4 +- docs/reference/optimizers.md | 25 +- .../search_algorithms/landing_page_algo.md | 25 +- neps_examples/__init__.py | 1 + neps_examples/basic_usage/hyperparameters.py | 6 - .../basic_usage/pytorch_nn_example.py | 127 ++++++++ .../neps_spaces/pytorch_nn_example.ipynb | 282 ------------------ 9 files changed, 172 insertions(+), 338 deletions(-) create mode 100644 neps_examples/basic_usage/pytorch_nn_example.py delete mode 100644 neps_examples/neps_spaces/pytorch_nn_example.ipynb diff --git a/docs/getting_started.md b/docs/getting_started.md index cfec9cf48..c9e763f2b 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -16,15 +16,14 @@ pip install neural-pipeline-search 1. **Establish a [`pipeline_space=`](reference/neps_spaces.md)**: ```python -pipeline_space={ - "some_parameter": (0.0, 1.0), # float - "another_parameter": (0, 10), # integer - "optimizer": ["sgd", "adam"], # categorical - "epoch": neps.Integer(lower=1, upper=100, is_fidelity=True), - "learning_rate": neps.Float(lower=1e-5, upper=1, log=True), - "alpha": neps.Float(lower=0.1, upper=1.0, prior=0.99, prior_confidence="high") -} - +class PipelineSpace(neps.Pipeline): + # Define the parameters of your search space + some_parameter = neps.Float(min_value=0.0, max_value=1.0) # float + another_parameter = neps.Integer(min_value=0, max_value=10) # integer + optimizer = neps.Categorical(choices=("sgd", "adam")) # categorical + epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=100)) + learning_rate = neps.Float(min_value=1e-5, max_value=1, log=True) + alpha = neps.Float(min_value=0.1, max_value=1.0, prior=0.99, prior_confidence="high") ``` 2. **Define an `evaluate_pipeline()` function**: @@ -42,7 +41,7 @@ def evaluate_pipeline(some_parameter: float, 3. **Execute with [`neps.run()`](reference/neps_run.md)**: ```python -neps.run(evaluate_pipeline, pipeline_space) +neps.run(evaluate_pipeline, PipelineSpace()) ``` --- diff --git a/docs/index.md b/docs/index.md index d6e618e19..4ed7f5618 100644 --- a/docs/index.md +++ b/docs/index.md @@ -59,33 +59,26 @@ import logging # 1. Define a function that accepts hyperparameters and computes the validation error -def evaluate_pipeline( - hyperparameter_a: float, hyperparameter_b: int, architecture_parameter: str -) -> dict: +def evaluate_pipeline(hyperparameter_a: float, hyperparameter_b: int, architecture_parameter: str): # Create your model model = MyModel(architecture_parameter) # Train and evaluate the model with your training pipeline - validation_error = train_and_eval( - model, hyperparameter_a, hyperparameter_b - ) + validation_error = train_and_eval(model, hyperparameter_a, hyperparameter_b) return validation_error # 2. Define a search space of parameters; use the same parameter names as in evaluate_pipeline -pipeline_space = dict( - hyperparameter_a=neps.Float( - lower=0.001, upper=0.1, log=True # The search space is sampled in log space - ), - hyperparameter_b=neps.Integer(lower=1, upper=42), - architecture_parameter=neps.Categorical(["option_a", "option_b"]), -) +class PipelineSpace(neps.Pipeline): + hyperparameter_a = neps.Float(min_value=0.001, max_value=0.1, log=True) # Log scale parameter + hyperparameter_b = neps.Integer(min_value=1, max_value=42) + architecture_parameter = neps.Categorical(choices=("option_a", "option_b")) # 3. Run the NePS optimization logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, + pipeline_space=PipelineSpace(), root_directory="path/to/save/results", # Replace with the actual path. max_evaluations_total=100, ) diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 742889f1b..2a988cebe 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -22,7 +22,7 @@ Additionally, **NePS spaces** can describe [complex (hierarchical) architectures A **NePS space** is defined as a subclass of [`Pipeline`][neps.space.neps_spaces.parameters.Pipeline]: ```python -from neps.space.neps_spaces.parameters import Pipeline, Float, Integer, Categorical, Fidelity, Resampled, Operation +from neps import Pipeline, Float, Integer, Categorical, Fidelity, Resampled, Operation class pipeline_space(Pipeline): ``` @@ -33,7 +33,7 @@ Here we define the hyperparameters that make up the space, like so: float_param = Float(min_value=0.1, max_value=1.0) int_param = Integer(min_value=1, max_value=10) - cat_param = Categorical(choices=["A", "B", "C"]) + cat_param = Categorical(choices=("A", "B", "C")) epochs = Fidelity(Integer(1, 16)) ``` diff --git a/docs/reference/optimizers.md b/docs/reference/optimizers.md index 3c6502624..5aa68d1ce 100644 --- a/docs/reference/optimizers.md +++ b/docs/reference/optimizers.md @@ -42,18 +42,19 @@ NePS provides a multitude of optimizers from the literature, the [algorithms](.. ✅ = supported/necessary, ❌ = not supported, ✔️* = optional, click for details, ✖️\* ignorable, click for details -| Algorithm | [Multi-Fidelity](../reference/search_algorithms/multifidelity.md) | [Priors](../reference/search_algorithms/prior.md) | Model-based | -| :- | :------------: | :----: | :---------: | -| `Grid Search`|[️️✖️*][neps.optimizers.algorithms.grid_search]|❌|❌| -| `Random Search`|[️️✖️*][neps.optimizers.algorithms.random_search]|[✔️*][neps.optimizers.algorithms.random_search]|❌| -| [`Bayesian Optimization`](../reference/search_algorithms/bayesian_optimization.md)|[️️✖️*][neps.optimizers.algorithms.bayesian_optimization]|❌|✅| -| [`Successive Halving`](../reference/search_algorithms/multifidelity.md#1-successive-halfing)|✅|[✔️*][neps.optimizers.algorithms.successive_halving]|❌| -| [`ASHA`](../reference/search_algorithms/multifidelity.md#asynchronous-successive-halving)|✅|[✔️*][neps.optimizers.algorithms.asha]|❌| -| [`Hyperband`](../reference/search_algorithms/multifidelity.md#2-hyperband)|✅|[✔️*][neps.optimizers.algorithms.hyperband]|❌| -| [`Asynch HB`](../reference/search_algorithms/multifidelity.md)|✅|[✔️*][neps.optimizers.algorithms.async_hb]|❌| -| [`IfBO`](../reference/search_algorithms/multifidelity.md#3-in-context-freeze-thaw-bayesian-optimization)|✅|[✔️*][neps.optimizers.algorithms.ifbo]|✅| -| [`PiBO`](../reference/search_algorithms/prior.md#1-pibo)|[️️✖️*][neps.optimizers.algorithms.pibo]|✅|✅| -| [`PriorBand`](../reference/search_algorithms/multifidelity_prior.md#1-priorband)|✅|✅|✅| +| Algorithm | [Multi-Fidelity](../reference/search_algorithms/multifidelity.md) | [Priors](../reference/search_algorithms/prior.md) | Model-based | [NePS-ready](../reference/neps_spaces.md#hierarchies-and-architectures) | +| :- | :------------: | :----: | :---------: | :-----------------: | +| `Grid Search`|[️️✖️*][neps.optimizers.algorithms.grid_search]|❌|❌|❌| +| `Random Search`|[️️✖️*][neps.optimizers.algorithms.random_search]|[✔️*][neps.optimizers.algorithms.random_search]|❌|✅| +| `Complex Random Search`|[️️✖️*][neps.optimizers.algorithms.neps_complex_random_search]|[✔️*][neps.optimizers.algorithms.neps_complex_random_search]|❌|✅| +| [`Bayesian Optimization`](../reference/search_algorithms/bayesian_optimization.md)|[️️✖️*][neps.optimizers.algorithms.bayesian_optimization]|❌|✅|❌| +| [`Successive Halving`](../reference/search_algorithms/multifidelity.md#1-successive-halfing)|✅|[✔️*][neps.optimizers.algorithms.successive_halving]|❌|❌| +| [`ASHA`](../reference/search_algorithms/multifidelity.md#asynchronous-successive-halving)|✅|[✔️*][neps.optimizers.algorithms.asha]|❌|❌| +| [`Hyperband`](../reference/search_algorithms/multifidelity.md#2-hyperband)|✅|[✔️*][neps.optimizers.algorithms.hyperband]|❌|❌| +| [`Asynch HB`](../reference/search_algorithms/multifidelity.md)|✅|[✔️*][neps.optimizers.algorithms.async_hb]|❌|❌| +| [`IfBO`](../reference/search_algorithms/multifidelity.md#3-in-context-freeze-thaw-bayesian-optimization)|✅|[✔️*][neps.optimizers.algorithms.ifbo]|✅|❌| +| [`PiBO`](../reference/search_algorithms/prior.md#1-pibo)|[️️✖️*][neps.optimizers.algorithms.pibo]|✅|✅|❌| +| [`PriorBand`](../reference/search_algorithms/multifidelity_prior.md#1-priorband)|✅|✅|✅|✅| If you prefer not to specify a particular optimizer for your AutoML task, you can simply pass `"auto"` or `None` for the neps optimizer. This provides a hassle-free way to get started quickly, as NePS will automatically choose the best optimizer based on the characteristics of your search diff --git a/docs/reference/search_algorithms/landing_page_algo.md b/docs/reference/search_algorithms/landing_page_algo.md index 36e5558e1..7d80d6090 100644 --- a/docs/reference/search_algorithms/landing_page_algo.md +++ b/docs/reference/search_algorithms/landing_page_algo.md @@ -6,18 +6,19 @@ We distinguish between algorithms that use different types of information and st ✅ = supported/necessary, ❌ = not supported, ✔️* = optional, click for details, ✖️\* ignorable, click for details -| Algorithm | [Multi-Fidelity](../search_algorithms/multifidelity.md) | [Priors](../search_algorithms/prior.md) | Model-based | -| :- | :------------: | :----: | :---------: | -| `Grid Search`|[️️✖️*][neps.optimizers.algorithms.grid_search]|❌|❌| -| `Random Search`|[️️✖️*][neps.optimizers.algorithms.random_search]|[✔️*][neps.optimizers.algorithms.random_search]|❌| -| [`Bayesian Optimization`](../search_algorithms/bayesian_optimization.md)|[️️✖️*][neps.optimizers.algorithms.bayesian_optimization]|❌|✅| -| [`Successive Halving`](../search_algorithms/multifidelity.md#1-successive-halfing)|✅|[✔️*][neps.optimizers.algorithms.successive_halving]|❌| -| [`ASHA`](../search_algorithms/multifidelity.md#asynchronous-successive-halving)|✅|[✔️*][neps.optimizers.algorithms.asha]|❌| -| [`Hyperband`](../search_algorithms/multifidelity.md#2-hyperband)|✅|[✔️*][neps.optimizers.algorithms.hyperband]|❌| -| [`Asynch HB`](../search_algorithms/multifidelity.md)|✅|[✔️*][neps.optimizers.algorithms.async_hb]|❌| -| [`IfBO`](../search_algorithms/multifidelity.md#3-in-context-freeze-thaw-bayesian-optimization)|✅|[✔️*][neps.optimizers.algorithms.ifbo]|✅| -| [`PiBO`](../search_algorithms/prior.md#1-pibo)|[️️✖️*][neps.optimizers.algorithms.pibo]|✅|✅| -| [`PriorBand`](../search_algorithms/multifidelity_prior.md#1-priorband)|✅|✅|✅| +| Algorithm | [Multi-Fidelity](../search_algorithms/multifidelity.md) | [Priors](../search_algorithms/prior.md) | Model-based | [NePS-ready](../neps_spaces.md#hierarchies-and-architectures) | +| :- | :------------: | :----: | :---------: | :-----------------: | +| `Grid Search`|[️️✖️*][neps.optimizers.algorithms.grid_search]|❌|❌|❌| +| `Random Search`|[️️✖️*][neps.optimizers.algorithms.random_search]|[✔️*][neps.optimizers.algorithms.random_search]|❌|✅| +| `Complex Random Search`|[️️✖️*][neps.optimizers.algorithms.neps_complex_random_search]|[✔️*][neps.optimizers.algorithms.neps_complex_random_search]|❌|✅| +| [`Bayesian Optimization`](../search_algorithms/bayesian_optimization.md)|[️️✖️*][neps.optimizers.algorithms.bayesian_optimization]|❌|✅|❌| +| [`Successive Halving`](../search_algorithms/multifidelity.md#1-successive-halfing)|✅|[✔️*][neps.optimizers.algorithms.successive_halving]|❌|❌| +| [`ASHA`](../search_algorithms/multifidelity.md#asynchronous-successive-halving)|✅|[✔️*][neps.optimizers.algorithms.asha]|❌|❌| +| [`Hyperband`](../search_algorithms/multifidelity.md#2-hyperband)|✅|[✔️*][neps.optimizers.algorithms.hyperband]|❌|❌| +| [`Asynch HB`](../search_algorithms/multifidelity.md)|✅|[✔️*][neps.optimizers.algorithms.async_hb]|❌|❌| +| [`IfBO`](../search_algorithms/multifidelity.md#3-in-context-freeze-thaw-bayesian-optimization)|✅|[✔️*][neps.optimizers.algorithms.ifbo]|✅|❌| +| [`PiBO`](../search_algorithms/prior.md#1-pibo)|[️️✖️*][neps.optimizers.algorithms.pibo]|✅|✅|❌| +| [`PriorBand`](../search_algorithms/multifidelity_prior.md#1-priorband)|✅|✅|✅|✅| ## What is Multi-Fidelity Optimization? diff --git a/neps_examples/__init__.py b/neps_examples/__init__.py index f1c8f4631..bd41652af 100644 --- a/neps_examples/__init__.py +++ b/neps_examples/__init__.py @@ -25,6 +25,7 @@ core_examples = [ # Run locally and on github actions "basic_usage/hyperparameters", # NOTE: This needs to be first for some tests to work "basic_usage/analyse", + "basic_usage/pytorch_nn_example", "experimental/expert_priors_for_architecture_and_hyperparameters", "efficiency/multi_fidelity", ] diff --git a/neps_examples/basic_usage/hyperparameters.py b/neps_examples/basic_usage/hyperparameters.py index 88ff20918..348ff6362 100644 --- a/neps_examples/basic_usage/hyperparameters.py +++ b/neps_examples/basic_usage/hyperparameters.py @@ -2,12 +2,6 @@ import numpy as np import neps -# This example demonstrates how to use NePS to optimize hyperparameters -# of a pipeline. The pipeline is a simple function that takes in -# five hyperparameters and returns their sum. -# Neps uses the default optimizer to minimize this objective function. - - def evaluate_pipeline(float1, float2, categorical, integer1, integer2): objective_to_minimize = -float( np.sum([float1, float2, int(categorical), integer1, integer2]) diff --git a/neps_examples/basic_usage/pytorch_nn_example.py b/neps_examples/basic_usage/pytorch_nn_example.py new file mode 100644 index 000000000..80baebbdb --- /dev/null +++ b/neps_examples/basic_usage/pytorch_nn_example.py @@ -0,0 +1,127 @@ +""" +This example demonstrates the full capabilities of NePS Spaces +by defining a neural network architecture using PyTorch modules. +It showcases how to interact with the NePS Spaces API to create, +sample and evaluate a neural network pipeline. +It also demonstrates how to convert the pipeline to a callable +and how to run NePS with the defined pipeline and space. +""" + +import numpy as np +import torch +import torch.nn as nn +import neps +from neps.space.neps_spaces.parameters import Pipeline, Operation, Categorical, Resampled +from neps.space.neps_spaces import neps_space + +# Define the neural network architecture using PyTorch as usual +class ReLUConvBN(nn.Module): + def __init__(self, out_channels, kernel_size, stride, padding): + super().__init__() + + self.kernel_size = kernel_size + self.op = nn.Sequential( + nn.ReLU(inplace=False), + nn.LazyConv2d( + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=2, + bias=False, + ), + nn.LazyBatchNorm2d(affine=True, track_running_stats=True), + ) + + def forward(self, x): + return self.op(x) + + +class Identity(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return x + + +# Define the NEPS space for the neural network architecture +class NN_Space(Pipeline): + _id = Operation(operator=Identity) + _three = Operation(operator=nn.Conv2d,kwargs={"in_channels":3, "out_channels":3, "kernel_size":3, "stride":1, "padding":1}) + _one = Operation(operator=nn.Conv2d,kwargs={"in_channels":3, "out_channels":3, "kernel_size":1, "stride":1, "padding":0}) + _reluconvbn = Operation(operator=ReLUConvBN, kwargs={"out_channels":3, "kernel_size":3, "stride":1, "padding":1}) + + _O = Categorical(choices=(_three, _one, _id)) + + _C_ARGS = Categorical( + choices=( + (Resampled(_O),), + (Resampled(_O), Resampled("model"), _reluconvbn), + (Resampled(_O), Resampled("model")), + (Resampled("model"),), + ), + ) + _C = Operation( + operator=nn.Sequential, + args=Resampled(_C_ARGS), + ) + + _model_ARGS = Categorical( + choices=( + (Resampled(_C),), + (_reluconvbn,), + (Resampled("model"),), + (Resampled("model"), Resampled(_C)), + (Resampled(_O), Resampled(_O), Resampled(_O)), + ( + Resampled("model"), + Resampled("model"), + Resampled(_O), + Resampled(_O), + Resampled(_O), + Resampled(_O), + Resampled(_O), + Resampled(_O), + ), + ), + ) + model = Operation( + operator=nn.Sequential, + args=Resampled(_model_ARGS), + ) + +# Sampling and printing one random configuration of the pipeline +pipeline = NN_Space() +resolved_pipeline, resolution_context = neps_space.resolve(pipeline) + +s = resolved_pipeline.model +s_config_string = neps_space.convert_operation_to_string(s) +pretty_config = neps_space.config_string.ConfigString(s_config_string).pretty_format() +s_callable = neps_space.convert_operation_to_callable(s) + +print("Callable:\n") +print(s_callable) + +print("\n\nConfig string:\n") +print(pretty_config) + +# Defining the pipeline, using the model from the NN_space space as callable +def evaluate_pipeline(model: nn.Sequential): + x = torch.ones(size=[1, 3, 220, 220]) + result = np.sum(model(x).detach().numpy().flatten()) + return result + + +# Run NePS with the defined pipeline and space and show the best configuration +pipeline_space = NN_Space() +neps.run( + evaluate_pipeline=evaluate_pipeline, + pipeline_space=pipeline_space, + optimizer=neps.algorithms.neps_random_search, + root_directory="results/neps_spaces_nn_example", + post_run_summary=True, + max_evaluations_total=5, + overwrite_working_directory=True, +) +neps.status("results/neps_spaces_nn_example", print_summary=True, pipeline_space_variables=(pipeline_space, ["model"])) diff --git a/neps_examples/neps_spaces/pytorch_nn_example.ipynb b/neps_examples/neps_spaces/pytorch_nn_example.ipynb deleted file mode 100644 index 5fc9a6352..000000000 --- a/neps_examples/neps_spaces/pytorch_nn_example.ipynb +++ /dev/null @@ -1,282 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "f3ca063f", - "metadata": {}, - "outputs": [], - "source": [ - "# Define the neural network architecture using PyTorch as usual\n", - "\n", - "import torch\n", - "import torch.nn as nn\n", - "\n", - "class ReLUConvBN(nn.Module):\n", - " def __init__(self, out_channels, kernel_size, stride, padding):\n", - " super().__init__()\n", - "\n", - " self.kernel_size = kernel_size\n", - " self.op = nn.Sequential(\n", - " nn.ReLU(inplace=False),\n", - " nn.LazyConv2d(\n", - " out_channels=out_channels,\n", - " kernel_size=kernel_size,\n", - " stride=stride,\n", - " padding=padding,\n", - " dilation=2,\n", - " bias=False,\n", - " ),\n", - " nn.LazyBatchNorm2d(affine=True, track_running_stats=True),\n", - " )\n", - "\n", - " def forward(self, x):\n", - " return self.op(x)\n", - "\n", - "\n", - "class Identity(nn.Module):\n", - " def __init__(self):\n", - " super().__init__()\n", - "\n", - " def forward(self, x):\n", - " return x\n", - " \n" - ] - }, - { - "cell_type": "code", - "execution_count": 54, - "id": "4bda71ce", - "metadata": {}, - "outputs": [], - "source": [ - "# Define the NEPS space for the neural network architecture\n", - "\n", - "from neps.space.neps_spaces.parameters import Pipeline, Operation, Categorical, Resampled\n", - "\n", - "class NN_Space(Pipeline):\n", - " _id = Operation(operator=Identity)\n", - " _three = Operation(operator=nn.Conv2d,kwargs={\"in_channels\":3, \"out_channels\":3, \"kernel_size\":3, \"stride\":1, \"padding\":1})\n", - " _one = Operation(operator=nn.Conv2d,kwargs={\"in_channels\":3, \"out_channels\":3, \"kernel_size\":1, \"stride\":1, \"padding\":0})\n", - " _reluconvbn = Operation(operator=ReLUConvBN, kwargs={\"out_channels\":3, \"kernel_size\":3, \"stride\":1, \"padding\":1})\n", - "\n", - " _O = Categorical(choices=(_three, _one, _id))\n", - "\n", - " _C_ARGS = Categorical(\n", - " choices=(\n", - " (Resampled(_O),),\n", - " (Resampled(_O), Resampled(\"model\"), _reluconvbn),\n", - " (Resampled(_O), Resampled(\"model\")),\n", - " (Resampled(\"model\"),),\n", - " ),\n", - " )\n", - " _C = Operation(\n", - " operator=nn.Sequential,\n", - " args=Resampled(_C_ARGS),\n", - " )\n", - "\n", - " _model_ARGS = Categorical(\n", - " choices=(\n", - " (Resampled(_C),),\n", - " (_reluconvbn,),\n", - " (Resampled(\"model\"),),\n", - " (Resampled(\"model\"), Resampled(_C)),\n", - " (Resampled(_O), Resampled(_O), Resampled(_O)),\n", - " (\n", - " Resampled(\"model\"),\n", - " Resampled(\"model\"),\n", - " Resampled(_O),\n", - " Resampled(_O),\n", - " Resampled(_O),\n", - " Resampled(_O),\n", - " Resampled(_O),\n", - " Resampled(_O),\n", - " ),\n", - " ),\n", - " )\n", - " model = Operation(\n", - " operator=nn.Sequential,\n", - " args=Resampled(_model_ARGS),\n", - " )\n", - " a = 5" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "17005669", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Callable:\n", - "\n", - "Sequential(\n", - " (0): Conv2d(3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", - " (1): Conv2d(3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", - " (2): Identity()\n", - ")\n", - "\n", - "\n", - "Config string:\n", - "\n", - "( ( {'in_channels': 3, 'out_channels': 3, 'kernel_size': 3, 'stride': 1, 'padding': 1}) () ())\n", - "\t01 :: \n", - "\t\t02 :: {'in_channels': 3, 'out_channels': 3, 'kernel_size': 3, 'stride': 1, 'padding': 1}\n", - "\t\t02 :: \n", - "\t\t02 :: \n" - ] - } - ], - "source": [ - "# Sampling and printing one random configuration of the pipeline\n", - "\n", - "from neps.space.neps_spaces import neps_space\n", - "\n", - "pipeline = NN_Space()\n", - "resolved_pipeline, resolution_context = neps_space.resolve(pipeline)\n", - "\n", - "s = resolved_pipeline.model\n", - "s_config_string = neps_space.convert_operation_to_string(s)\n", - "pretty_config = neps_space.config_string.ConfigString(s_config_string).pretty_format()\n", - "s_callable = neps_space.convert_operation_to_callable(s)\n", - "\n", - "print(\"Callable:\\n\")\n", - "print(s_callable)\n", - "\n", - "print(\"\\n\\nConfig string:\\n\")\n", - "print(pretty_config)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "9efeb556", - "metadata": {}, - "outputs": [], - "source": [ - "# Defining the pipeline, using the model from the NN_space space as callable\n", - "\n", - "import numpy as np\n", - "\n", - "def evaluate_pipeline(model: nn.Sequential):\n", - " x = torch.ones(size=[1, 3, 220, 220])\n", - " result = np.sum(model(x).detach().numpy().flatten())\n", - " return result" - ] - }, - { - "cell_type": "code", - "execution_count": 73, - "id": "8a18e349", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "NePSRandomSearch()\n", - "dict_keys(['model', 'a'])\n", - "\n" - ] - } - ], - "source": [ - "import yaml\n", - "import neps\n", - "from neps.optimizers import load_optimizer\n", - "from pathlib import Path\n", - "\n", - "import neps.space\n", - "import neps.space.neps_spaces\n", - "import neps.space.neps_spaces.neps_space\n", - "nn_space = NN_Space()\n", - "neps_state = neps.state.NePSState.create_or_load(path=Path(\"./results/compat/\"),load_only=True)\n", - "\n", - "print(load_optimizer(neps.algorithms.neps_random_search,nn_space)[0])\n", - "\n", - "\n", - "trial1 = neps_state.lock_and_sample_trial(optimizer=load_optimizer(neps.algorithms.neps_random_search,nn_space)[0], worker_id=\"1\")\n", - "import pprint\n", - "config = neps.space.neps_spaces.neps_space.NepsCompatConverter().from_neps_config(trial1.config)\n", - "resolved_pipeline, resolution_context = neps_space.resolve(pipeline=NN_Space(),\n", - " # Predefined samplings are the decisions made at each sampling step\n", - " domain_sampler=neps_space.OnlyPredefinedValuesSampler(predefined_samplings=config.predefined_samplings),\n", - " # Environment values are the fidelities and any arguments of the evaluation function not part of the search space\n", - " environment_values=config.environment_values)\n", - "print(resolved_pipeline.get_attrs().keys())\n", - "print(resolution_context)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "fa9cabbf", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "# Configs: 5\n", - "\n", - " success: 5\n", - "\n", - "# Best Found (config 3):\n", - "\n", - " objective_to_minimize: -12986.689453125\n", - " config:\n", - " ( () () ())\n", - " \t01 :: \n", - " \t\t02 :: \n", - " \t\t02 :: \n", - " \t\t02 :: \n", - " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\neps_spaces\\results\\neps_spaces_nn_example\\configs\\config_3\n", - "Done.\n" - ] - } - ], - "source": [ - "import neps\n", - "\n", - "pipeline_space = NN_Space()\n", - "\n", - "neps.run(\n", - " evaluate_pipeline=evaluate_pipeline,\n", - " pipeline_space=pipeline_space,\n", - " optimizer=neps.algorithms.neps_random_search,\n", - " root_directory=\"results/neps_spaces_nn_example\",\n", - " post_run_summary=True,\n", - " max_evaluations_total=5,\n", - " overwrite_working_directory=True,\n", - ")\n", - "neps.status(\"results/neps_spaces_nn_example\", print_summary=True, pipeline_space_variables=(pipeline_space, [\"model\"]))\n", - "\n", - "print(\"Done.\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "neural-pipeline-search", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.1" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From 19dbe2cc15b12eabea6230793aa68e4030e74483 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 10 Jul 2025 01:07:54 +0200 Subject: [PATCH 029/156] Add optimizer compatibility check for Pipeline in run function --- neps/api.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/neps/api.py b/neps/api.py index ee56aca03..a686a4fde 100644 --- a/neps/api.py +++ b/neps/api.py @@ -32,7 +32,7 @@ logger = logging.getLogger(__name__) -def run( # noqa: PLR0913, C901 +def run( # noqa: PLR0913, C901, PLR0912 evaluate_pipeline: Callable[..., EvaluatePipelineReturn] | str, pipeline_space: ConfigurationSpace | Pipeline, *, @@ -354,6 +354,19 @@ def __call__( space = convert_to_space(pipeline_space) _optimizer_ask, _optimizer_info = load_optimizer(optimizer=optimizer, space=space) + # Optimizer compatibility check: If the space is a Pipeline and the optimizer is not + # one of the NEPS optimizers, we raise an error. + if isinstance(space, Pipeline) and _optimizer_ask not in ( + neps.optimizers.algorithms.neps_random_search, + neps.optimizers.algorithms.neps_priorband, + neps.optimizers.algorithms.neps_complex_random_search, + ): + raise ValueError( + "The provided optimizer is not compatible with this complex search space. " + "Please use one of the NEPS optimizers, such as 'neps_random_search', " + "'neps_priorband', or 'neps_complex_random_search'." + ) + _eval: Callable if isinstance(evaluate_pipeline, str): module, funcname = evaluate_pipeline.rsplit(":", 1) From 353280517346fd646be2993d0ce323f8c279b8e9 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 10 Jul 2025 01:29:38 +0200 Subject: [PATCH 030/156] Add warmstarting functionality to the run method and create example script - Introduced warmstart_configs parameter in the run function to allow warmstarting with predefined configurations. - Implemented warmstart_neps function to handle the warmstarting process. - Added a new example script demonstrating the warmstarting feature using a simple pipeline. --- neps/api.py | 170 +++++++++++++++++++++-- neps_examples/efficiency/warmstarting.py | 39 ++++++ 2 files changed, 195 insertions(+), 14 deletions(-) create mode 100644 neps_examples/efficiency/warmstarting.py diff --git a/neps/api.py b/neps/api.py index a686a4fde..59e078796 100644 --- a/neps/api.py +++ b/neps/api.py @@ -3,6 +3,7 @@ from __future__ import annotations import logging +import shutil import warnings from collections.abc import Callable, Mapping from functools import partial @@ -12,13 +13,19 @@ import neps import neps.optimizers.algorithms from neps.optimizers import AskFunction, OptimizerChoice, load_optimizer +from neps.optimizers.ask_and_tell import AskAndTell from neps.runtime import _launch_runtime +from neps.space.neps_spaces import neps_space from neps.space.neps_spaces.neps_space import ( + NepsCompatConverter, adjust_evaluation_pipeline_for_neps_space, convert_neps_to_classic_search_space, ) from neps.space.neps_spaces.parameters import Pipeline from neps.space.parsing import convert_to_space +from neps.state import NePSState, OptimizationState, SeedSnapshot +from neps.state.neps_state import TrialRepo +from neps.state.pipeline_eval import EvaluatePipelineReturn from neps.status.status import post_run_csv from neps.utils.common import dynamic_load_object @@ -57,6 +64,16 @@ def run( # noqa: PLR0913, C901, PLR0912 | CustomOptimizer | Literal["auto"] ) = "auto", + warmstart_configs: ( + list[ + tuple[ + dict[str, Any] | Mapping[str, Any], + dict[str, Any] | Mapping[str, Any], + Any, + ] + ] + | None + ) = None, ) -> None: """Run the optimization. @@ -290,6 +307,16 @@ def __call__( This is mainly meant for internal development but allows you to use the NePS runtime to run your optimizer. + warmstart_configs: A list of configurations to warmstart the NePS state with. + This is useful for testing and debugging purposes, where you want to + start with a set of predefined configurations and their results. + Each configuration is a tuple of three elements: + 1. A dictionary of the samplings to make, i.e. resolution_context.samplings_made + 2. A dictionary of the environment values, i.e. resolution_context.environment_values + 3. The result of the evaluation, which is the return value of the `evaluate_pipeline` + function, i.e. the objective value to minimize or a dictionary with + `"objective_to_minimize"` and `"cost"` keys. + """ # noqa: E501 if ( max_evaluations_total is None @@ -306,6 +333,21 @@ def __call__( stacklevel=2, ) + if warmstart_configs: + logger.info( + "Warmstarting neps.run with the provided" + f" {len(warmstart_configs)} configurations using root directory" + f" {root_directory}." + ) + warmstart_neps( + path=Path(root_directory), + pipeline_space=pipeline_space, + warmstart_configs=warmstart_configs, + optimizer=optimizer, + overwrite_working_directory=overwrite_working_directory, + ) + overwrite_working_directory = False + logger.info(f"Starting neps.run using root directory {root_directory}") # Check if the pipeline_space only contains basic HPO parameters. @@ -336,7 +378,7 @@ def __call__( ) ) and optimizer != "auto" - ): + ) and not warmstart_configs: converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space: logger.info( @@ -345,6 +387,33 @@ def __call__( ) pipeline_space = converted_space + # Optimizer check, if the search space is a Pipeline and the optimizer is not a NEPS + # algorithm, we raise an error, as the optimizer is not compatible. + if ( + isinstance(pipeline_space, Pipeline) + and optimizer + not in ( + neps.optimizers.algorithms.neps_random_search, + neps.optimizers.algorithms.neps_priorband, + neps.optimizers.algorithms.neps_complex_random_search, + ) + and ( + not inner_optimizer + or inner_optimizer + not in ( + neps.optimizers.algorithms.neps_random_search, + neps.optimizers.algorithms.neps_priorband, + neps.optimizers.algorithms.neps_complex_random_search, + ) + ) + and optimizer != "auto" + ): + raise ValueError( + "The provided optimizer is not compatible with this complex search space. " + "Please use one of the NEPS optimizers, such as 'neps_random_search', " + "'neps_priorband', or 'neps_complex_random_search'." + ) + if isinstance(pipeline_space, Pipeline): assert not isinstance(evaluate_pipeline, str) evaluate_pipeline = adjust_evaluation_pipeline_for_neps_space( @@ -354,19 +423,6 @@ def __call__( space = convert_to_space(pipeline_space) _optimizer_ask, _optimizer_info = load_optimizer(optimizer=optimizer, space=space) - # Optimizer compatibility check: If the space is a Pipeline and the optimizer is not - # one of the NEPS optimizers, we raise an error. - if isinstance(space, Pipeline) and _optimizer_ask not in ( - neps.optimizers.algorithms.neps_random_search, - neps.optimizers.algorithms.neps_priorband, - neps.optimizers.algorithms.neps_complex_random_search, - ): - raise ValueError( - "The provided optimizer is not compatible with this complex search space. " - "Please use one of the NEPS optimizers, such as 'neps_random_search', " - "'neps_priorband', or 'neps_complex_random_search'." - ) - _eval: Callable if isinstance(evaluate_pipeline, str): module, funcname = evaluate_pipeline.rsplit(":", 1) @@ -416,4 +472,90 @@ def __call__( ) +def warmstart_neps( + path: Path, + pipeline_space: Pipeline, + warmstart_configs: list[ + tuple[ + dict[str, Any] | Mapping[str, Any], + dict[str, Any] | Mapping[str, Any], + EvaluatePipelineReturn, + ] + ], + optimizer: ( + OptimizerChoice + | Mapping[str, Any] + | tuple[OptimizerChoice, Mapping[str, Any]] + | Callable[Concatenate[SearchSpace, ...], AskFunction] # Hack, while we transit + | Callable[Concatenate[Pipeline, ...], AskFunction] # from SearchSpace to + | Callable[Concatenate[SearchSpace | Pipeline, ...], AskFunction] # Pipeline + | CustomOptimizer + | Literal["auto"] + ) = "auto", + overwrite_working_directory: bool = False, # noqa: FBT001, FBT002 +) -> None: + """Warmstart the NePS state with given configurations. + This is useful for testing and debugging purposes, where you want to + start with a set of predefined configurations and their results. + + Args: + path: The path to the NePS state directory. + pipeline_space: The pipeline space to use for the warmstart. + warmstart_configs: A list of tuples, where each tuple contains a configuration, + environment values, and the result of the evaluation. + The configuration is a dictionary of parameter values, the environment values + are also a dictionary, and the result is the evaluation result. + optimizer: The optimizer to use for the warmstart. This can be a string, a + callable, or a tuple of a callable and a dictionary of parameters. + If "auto", the optimizer will be chosen based on the pipeline space. + overwrite_working_directory: If True, the working directory will be deleted before + starting the warmstart. This is useful for testing and debugging purposes, + where you want to start with a clean state. + """ + if overwrite_working_directory and path.is_dir(): + shutil.rmtree(path) + optimizer_ask, optimizer_info = neps.optimizers.load_optimizer( + optimizer, pipeline_space + ) + state = NePSState.create_or_load( + path, + optimizer_info=optimizer_info, + optimizer_state=OptimizationState( + budget=None, seed_snapshot=SeedSnapshot.new_capture(), shared_state={} + ), + ) + for n_config, (config, env, result) in enumerate(warmstart_configs): + _, resolution_context = neps_space.resolve( + pipeline=pipeline_space, + domain_sampler=neps_space.OnlyPredefinedValuesSampler( + predefined_samplings=config + ), + environment_values=env, + ) + + ask_tell = AskAndTell(optimizer=optimizer_ask, worker_id="warmstart_worker") + trial = ask_tell.tell_custom( + config_id=f"{n_config}{'_0' if pipeline_space.fidelity_attrs else ''}", + config=config, + result=result, + ) + trial.config = NepsCompatConverter.to_neps_config(resolution_context) + if ( + path + / f"configs/config_{n_config}{'_0' if pipeline_space.fidelity_attrs else ''}" + ).is_dir(): + raise ValueError( + f"Warmstart config {n_config} already exists in {path}. Please remove it" + " before running the script again." + ) + TrialRepo(path / "configs").store_new_trial(trial) + assert trial.report + assert trial.metadata.evaluating_worker_id + state.lock_and_report_trial_evaluation( + trial=trial, + report=trial.report, + worker_id=trial.metadata.evaluating_worker_id, + ) + + __all__ = ["run"] diff --git a/neps_examples/efficiency/warmstarting.py b/neps_examples/efficiency/warmstarting.py new file mode 100644 index 000000000..548a93212 --- /dev/null +++ b/neps_examples/efficiency/warmstarting.py @@ -0,0 +1,39 @@ +import neps +import logging +from neps import Pipeline, Integer, Float, Fidelity +from neps.space.neps_spaces import neps_space + + +class SimpleSpace(Pipeline): + int_param = Integer(0, 10) + float_param = Float(0.0, 1.0) + epochs = Fidelity(Integer(1, 5)) + + +# Sampling a random configuration of the pipeline, which will be used for warmstarting +pipeline = SimpleSpace() +resolved_pipeline, resolution_context = neps_space.resolve( + pipeline, environment_values={"epochs": 5} +) + + +def evaluate_pipeline(int_param, float_param, epochs=5): + # This is a dummy evaluation function that just returns the weighted sum + return {"objective_to_minimize": (int_param + float_param)*epochs, "cost": epochs} + + +wanted_config = resolution_context.samplings_made +wanted_env = resolution_context.environment_values +wanted_result = evaluate_pipeline(**resolved_pipeline.get_attrs()) +warmstarting_configs = [(wanted_config, wanted_env, wanted_result)] + + +logging.basicConfig(level=logging.INFO) +neps.run( + evaluate_pipeline=evaluate_pipeline, + pipeline_space=SimpleSpace(), + root_directory="results/warmstart_example/", + max_evaluations_total=15, + optimizer=neps.algorithms.neps_priorband, + warmstart_configs=warmstarting_configs +) From b127740df97d6e06266a22519acf9809886d89d8 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 10 Jul 2025 01:39:30 +0200 Subject: [PATCH 031/156] - Import warmstart_neps in the API module. - Update the warmstart_neps function to accept working_directory instead of path. - Refactor warmstarting example. --- neps/__init__.py | 3 +- neps/api.py | 35 ++++++++++++------------ neps_examples/efficiency/warmstarting.py | 9 +++--- 3 files changed, 24 insertions(+), 23 deletions(-) diff --git a/neps/__init__.py b/neps/__init__.py index aa1883d36..a506703bc 100644 --- a/neps/__init__.py +++ b/neps/__init__.py @@ -5,7 +5,7 @@ and algorithms. """ -from neps.api import run +from neps.api import run, warmstart_neps from neps.optimizers import algorithms from neps.optimizers.ask_and_tell import AskAndTell from neps.optimizers.optimizer import SampledConfig @@ -50,4 +50,5 @@ "run", "status", "tblogger", + "warmstart_neps", ] diff --git a/neps/api.py b/neps/api.py index 59e078796..a4c2b22f4 100644 --- a/neps/api.py +++ b/neps/api.py @@ -5,7 +5,7 @@ import logging import shutil import warnings -from collections.abc import Callable, Mapping +from collections.abc import Callable, Mapping, Sequence from functools import partial from pathlib import Path from typing import TYPE_CHECKING, Any, Concatenate, Literal @@ -340,7 +340,7 @@ def __call__( f" {root_directory}." ) warmstart_neps( - path=Path(root_directory), + working_directory=Path(root_directory), pipeline_space=pipeline_space, warmstart_configs=warmstart_configs, optimizer=optimizer, @@ -473,15 +473,16 @@ def __call__( def warmstart_neps( - path: Path, pipeline_space: Pipeline, - warmstart_configs: list[ + working_directory: Path | str, + warmstart_configs: Sequence[ tuple[ dict[str, Any] | Mapping[str, Any], dict[str, Any] | Mapping[str, Any], EvaluatePipelineReturn, ] ], + overwrite_working_directory: bool = False, # noqa: FBT001, FBT002 optimizer: ( OptimizerChoice | Mapping[str, Any] @@ -492,33 +493,33 @@ def warmstart_neps( | CustomOptimizer | Literal["auto"] ) = "auto", - overwrite_working_directory: bool = False, # noqa: FBT001, FBT002 ) -> None: """Warmstart the NePS state with given configurations. This is useful for testing and debugging purposes, where you want to start with a set of predefined configurations and their results. Args: - path: The path to the NePS state directory. pipeline_space: The pipeline space to use for the warmstart. + working_directory: The path to the NePS state directory. warmstart_configs: A list of tuples, where each tuple contains a configuration, environment values, and the result of the evaluation. The configuration is a dictionary of parameter values, the environment values are also a dictionary, and the result is the evaluation result. - optimizer: The optimizer to use for the warmstart. This can be a string, a - callable, or a tuple of a callable and a dictionary of parameters. - If "auto", the optimizer will be chosen based on the pipeline space. overwrite_working_directory: If True, the working directory will be deleted before starting the warmstart. This is useful for testing and debugging purposes, where you want to start with a clean state. + optimizer: The optimizer to use for the warmstart. This can be a string, a + callable, or a tuple of a callable and a dictionary of parameters. + If "auto", the optimizer will be chosen based on the pipeline space. """ - if overwrite_working_directory and path.is_dir(): - shutil.rmtree(path) + working_directory = Path(working_directory) + if overwrite_working_directory and working_directory.is_dir(): + shutil.rmtree(working_directory) optimizer_ask, optimizer_info = neps.optimizers.load_optimizer( optimizer, pipeline_space ) state = NePSState.create_or_load( - path, + working_directory, optimizer_info=optimizer_info, optimizer_state=OptimizationState( budget=None, seed_snapshot=SeedSnapshot.new_capture(), shared_state={} @@ -541,14 +542,14 @@ def warmstart_neps( ) trial.config = NepsCompatConverter.to_neps_config(resolution_context) if ( - path + working_directory / f"configs/config_{n_config}{'_0' if pipeline_space.fidelity_attrs else ''}" ).is_dir(): raise ValueError( - f"Warmstart config {n_config} already exists in {path}. Please remove it" - " before running the script again." + f"Warmstart config {n_config} already exists in {working_directory}." + " Please remove it before running the script again." ) - TrialRepo(path / "configs").store_new_trial(trial) + TrialRepo(working_directory / "configs").store_new_trial(trial) assert trial.report assert trial.metadata.evaluating_worker_id state.lock_and_report_trial_evaluation( @@ -558,4 +559,4 @@ def warmstart_neps( ) -__all__ = ["run"] +__all__ = ["run", "warmstart_neps"] diff --git a/neps_examples/efficiency/warmstarting.py b/neps_examples/efficiency/warmstarting.py index 548a93212..1f3b21ef5 100644 --- a/neps_examples/efficiency/warmstarting.py +++ b/neps_examples/efficiency/warmstarting.py @@ -3,7 +3,6 @@ from neps import Pipeline, Integer, Float, Fidelity from neps.space.neps_spaces import neps_space - class SimpleSpace(Pipeline): int_param = Integer(0, 10) float_param = Float(0.0, 1.0) @@ -17,9 +16,8 @@ class SimpleSpace(Pipeline): ) -def evaluate_pipeline(int_param, float_param, epochs=5): - # This is a dummy evaluation function that just returns the weighted sum - return {"objective_to_minimize": (int_param + float_param)*epochs, "cost": epochs} +def evaluate_pipeline(int_param, float_param, epochs=5) -> dict[str, float]: + return {"objective_to_minimize": (int_param + float_param) * epochs, "cost": epochs} wanted_config = resolution_context.samplings_made @@ -28,6 +26,7 @@ def evaluate_pipeline(int_param, float_param, epochs=5): warmstarting_configs = [(wanted_config, wanted_env, wanted_result)] +# Running the NEPS pipeline with warmstarting logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, @@ -35,5 +34,5 @@ def evaluate_pipeline(int_param, float_param, epochs=5): root_directory="results/warmstart_example/", max_evaluations_total=15, optimizer=neps.algorithms.neps_priorband, - warmstart_configs=warmstarting_configs + warmstart_configs=warmstarting_configs, ) From 5a5e0c4a781d4ec7d4a4d7af74716fc9bff798f0 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 10 Jul 2025 03:12:10 +0200 Subject: [PATCH 032/156] Refactor warmstart_neps function to improve parameter naming and logging; enhance fidelity handling in trial evaluation. --- neps/api.py | 120 +++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 87 insertions(+), 33 deletions(-) diff --git a/neps/api.py b/neps/api.py index a4c2b22f4..6ee492e0a 100644 --- a/neps/api.py +++ b/neps/api.py @@ -12,6 +12,7 @@ import neps import neps.optimizers.algorithms +import neps.optimizers.neps_bracket_optimizer from neps.optimizers import AskFunction, OptimizerChoice, load_optimizer from neps.optimizers.ask_and_tell import AskAndTell from neps.runtime import _launch_runtime @@ -334,13 +335,8 @@ def __call__( ) if warmstart_configs: - logger.info( - "Warmstarting neps.run with the provided" - f" {len(warmstart_configs)} configurations using root directory" - f" {root_directory}." - ) warmstart_neps( - working_directory=Path(root_directory), + root_directory=Path(root_directory), pipeline_space=pipeline_space, warmstart_configs=warmstart_configs, optimizer=optimizer, @@ -474,7 +470,7 @@ def __call__( def warmstart_neps( pipeline_space: Pipeline, - working_directory: Path | str, + root_directory: Path | str, warmstart_configs: Sequence[ tuple[ dict[str, Any] | Mapping[str, Any], @@ -500,7 +496,7 @@ def warmstart_neps( Args: pipeline_space: The pipeline space to use for the warmstart. - working_directory: The path to the NePS state directory. + root_directory: The path to the NePS state directory. warmstart_configs: A list of tuples, where each tuple contains a configuration, environment values, and the result of the evaluation. The configuration is a dictionary of parameter values, the environment values @@ -512,14 +508,19 @@ def warmstart_neps( callable, or a tuple of a callable and a dictionary of parameters. If "auto", the optimizer will be chosen based on the pipeline space. """ - working_directory = Path(working_directory) - if overwrite_working_directory and working_directory.is_dir(): - shutil.rmtree(working_directory) + logger.info( + "Warmstarting neps.run with the provided" + f" {len(warmstart_configs)} configurations using root directory" + f" {root_directory}." + ) + root_directory = Path(root_directory) + if overwrite_working_directory and root_directory.is_dir(): + shutil.rmtree(root_directory) optimizer_ask, optimizer_info = neps.optimizers.load_optimizer( optimizer, pipeline_space ) state = NePSState.create_or_load( - working_directory, + root_directory, optimizer_info=optimizer_info, optimizer_state=OptimizationState( budget=None, seed_snapshot=SeedSnapshot.new_capture(), shared_state={} @@ -535,28 +536,81 @@ def warmstart_neps( ) ask_tell = AskAndTell(optimizer=optimizer_ask, worker_id="warmstart_worker") - trial = ask_tell.tell_custom( - config_id=f"{n_config}{'_0' if pipeline_space.fidelity_attrs else ''}", - config=config, - result=result, - ) - trial.config = NepsCompatConverter.to_neps_config(resolution_context) - if ( - working_directory - / f"configs/config_{n_config}{'_0' if pipeline_space.fidelity_attrs else ''}" - ).is_dir(): - raise ValueError( - f"Warmstart config {n_config} already exists in {working_directory}." - " Please remove it before running the script again." + if pipeline_space.fidelity_attrs: + assert isinstance( + optimizer_ask, + neps.optimizers.neps_bracket_optimizer._NePSBracketOptimizer, + ), ( + "The optimizer must be a NePSBracketOptimizer when using fidelity" + " attributes." + ) + rung_to_fid = optimizer_ask.rung_to_fid + fid_to_rung = { + v: max(k for k, val in rung_to_fid.items() if val == v) + for v in rung_to_fid.values() + } + fidelity_value = env[next(iter(pipeline_space.fidelity_attrs.keys()))] + highest_rung = max( + [ + fid_to_rung[small_key] + for small_key in [key for key in fid_to_rung if key <= fidelity_value] + ] + ) + for rung in range(highest_rung + 1): + # Store the config for each rung + config_path = f"{n_config}_{rung}" + + # Check if result is a UserResultDict by checking its structure + if isinstance(result, dict) and "cost" in result: + # This is a UserResultDict-like dictionary + rung_result = result.copy() + rung_result["cost"] = rung_result.get("cost", 0) / (highest_rung + 1) # type: ignore + else: + # This is a simple numeric result + rung_result = result # type: ignore + trial = ask_tell.tell_custom( + config_id=config_path, + config=config, + result=rung_result, + previous_trial_id=f"{n_config}_{rung - 1}" if rung > 0 else None, + ) + trial.config = NepsCompatConverter.to_neps_config(resolution_context) + if (root_directory / config_path).is_dir(): + raise ValueError( + f"Warmstart config {n_config} already exists in" + f" {root_directory}. Please remove it before running the" + " script again." + ) + TrialRepo(root_directory / "configs").store_new_trial(trial) + assert trial.report + assert trial.metadata.evaluating_worker_id + state.lock_and_report_trial_evaluation( + trial=trial, + report=trial.report, + worker_id=trial.metadata.evaluating_worker_id, + ) + + else: + config_path = f"{n_config}" + trial = ask_tell.tell_custom( + config_id=config_path, + config=config, + result=result, + ) + trial.config = NepsCompatConverter.to_neps_config(resolution_context) + if (root_directory / config_path).is_dir(): + raise ValueError( + f"Warmstart config {n_config} already exists in {root_directory}." + " Please remove it before running the script again." + ) + TrialRepo(root_directory / "configs").store_new_trial(trial) + assert trial.report + assert trial.metadata.evaluating_worker_id + state.lock_and_report_trial_evaluation( + trial=trial, + report=trial.report, + worker_id=trial.metadata.evaluating_worker_id, ) - TrialRepo(working_directory / "configs").store_new_trial(trial) - assert trial.report - assert trial.metadata.evaluating_worker_id - state.lock_and_report_trial_evaluation( - trial=trial, - report=trial.report, - worker_id=trial.metadata.evaluating_worker_id, - ) __all__ = ["run", "warmstart_neps"] From 79aab6031b5bd9a59d5fab947da6c1e774d670c4 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 10 Jul 2025 21:40:17 +0200 Subject: [PATCH 033/156] Add Gaussian priors to NePS Integer and Floats Refactor NEPS optimizer tests to use partial functions and update prior handling - Updated test cases in `test_neps_integration.py` to use `partial` for NEPS optimizers, allowing for `ignore_fidelity` parameter. - Adjusted root directory naming to reflect the function name of the optimizer. - Modified tests in `test_neps_integration_priorband__max_cost.py` and `test_neps_integration_priorband__max_evals.py` to use partial functions for NEPS optimizers. - Changed error message in `test_search_space__fidelity.py` to specify prior details. - Enhanced `test_neps_state.py` to include `prior_confidence` for Float, Categorical, and Integer parameters. - Updated lists of optimizers in `test_neps_state.py` to reflect changes in optimizer naming conventions. --- docs/reference/neps_spaces.md | 6 +- docs/reference/optimizers.md | 2 +- .../search_algorithms/landing_page_algo.md | 2 +- neps/api.py | 64 ++------- neps/optimizers/__init__.py | 16 ++- neps/optimizers/algorithms.py | 132 ++++++++++++++---- neps/optimizers/neps_bracket_optimizer.py | 2 +- neps/optimizers/neps_priorband.py | 2 +- neps/optimizers/neps_random_search.py | 98 ++++++++++--- neps/sampling/priors.py | 6 +- neps/space/neps_spaces/neps_space.py | 128 ++++++++++++++++- neps/space/neps_spaces/parameters.py | 9 +- neps/space/neps_spaces/sampling.py | 88 ++++++++---- neps/state/neps_state.py | 2 +- .../test_neps_space/test_neps_integration.py | 25 ++-- ...st_neps_integration_priorband__max_cost.py | 4 +- ...t_neps_integration_priorband__max_evals.py | 4 +- .../test_search_space__fidelity.py | 2 +- tests/test_state/test_neps_state.py | 18 ++- 19 files changed, 437 insertions(+), 173 deletions(-) diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 2a988cebe..41787291d 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -130,9 +130,9 @@ neps.run( Currently, NePS Spaces is compatible with these optimizers, which can be imported from [neps.algorithms][neps.optimizers.algorithms--neps-algorithms]: - - [`Random Search`][neps.optimizers.algorithms.neps_random_search], which can sample the space uniformly at random - - [`Complex Random Search`][neps.optimizers.algorithms.neps_complex_random_search], which can sample the space uniformly at random, using priors and mutating previously sampled configurations - - [`PriorBand`][neps.optimizers.algorithms.neps_priorband], which uses [multi-fidelity](./search_algorithms/multifidelity.md) and the prior knowledge encoded in the NePS space + - [`Random Search`][neps.optimizers.algorithms.random_search], which can sample the space uniformly at random + - [`Complex Random Search`][neps.optimizers.algorithms.complex_random_search], which can sample the space uniformly at random, using priors and mutating previously sampled configurations + - [`PriorBand`][neps.optimizers.algorithms.priorband], which uses [multi-fidelity](./search_algorithms/multifidelity.md) and the prior knowledge encoded in the NePS space ## Inspecting Configurations diff --git a/docs/reference/optimizers.md b/docs/reference/optimizers.md index 5aa68d1ce..37667fb44 100644 --- a/docs/reference/optimizers.md +++ b/docs/reference/optimizers.md @@ -46,7 +46,7 @@ NePS provides a multitude of optimizers from the literature, the [algorithms](.. | :- | :------------: | :----: | :---------: | :-----------------: | | `Grid Search`|[️️✖️*][neps.optimizers.algorithms.grid_search]|❌|❌|❌| | `Random Search`|[️️✖️*][neps.optimizers.algorithms.random_search]|[✔️*][neps.optimizers.algorithms.random_search]|❌|✅| -| `Complex Random Search`|[️️✖️*][neps.optimizers.algorithms.neps_complex_random_search]|[✔️*][neps.optimizers.algorithms.neps_complex_random_search]|❌|✅| +| `Complex Random Search`|[️️✖️*][neps.optimizers.algorithms.complex_random_search]|[✔️*][neps.optimizers.algorithms.complex_random_search]|❌|✅| | [`Bayesian Optimization`](../reference/search_algorithms/bayesian_optimization.md)|[️️✖️*][neps.optimizers.algorithms.bayesian_optimization]|❌|✅|❌| | [`Successive Halving`](../reference/search_algorithms/multifidelity.md#1-successive-halfing)|✅|[✔️*][neps.optimizers.algorithms.successive_halving]|❌|❌| | [`ASHA`](../reference/search_algorithms/multifidelity.md#asynchronous-successive-halving)|✅|[✔️*][neps.optimizers.algorithms.asha]|❌|❌| diff --git a/docs/reference/search_algorithms/landing_page_algo.md b/docs/reference/search_algorithms/landing_page_algo.md index 7d80d6090..0a91a2275 100644 --- a/docs/reference/search_algorithms/landing_page_algo.md +++ b/docs/reference/search_algorithms/landing_page_algo.md @@ -10,7 +10,7 @@ We distinguish between algorithms that use different types of information and st | :- | :------------: | :----: | :---------: | :-----------------: | | `Grid Search`|[️️✖️*][neps.optimizers.algorithms.grid_search]|❌|❌|❌| | `Random Search`|[️️✖️*][neps.optimizers.algorithms.random_search]|[✔️*][neps.optimizers.algorithms.random_search]|❌|✅| -| `Complex Random Search`|[️️✖️*][neps.optimizers.algorithms.neps_complex_random_search]|[✔️*][neps.optimizers.algorithms.neps_complex_random_search]|❌|✅| +| `Complex Random Search`|[️️✖️*][neps.optimizers.algorithms.complex_random_search]|[✔️*][neps.optimizers.algorithms.complex_random_search]|❌|✅| | [`Bayesian Optimization`](../search_algorithms/bayesian_optimization.md)|[️️✖️*][neps.optimizers.algorithms.bayesian_optimization]|❌|✅|❌| | [`Successive Halving`](../search_algorithms/multifidelity.md#1-successive-halfing)|✅|[✔️*][neps.optimizers.algorithms.successive_halving]|❌|❌| | [`ASHA`](../search_algorithms/multifidelity.md#asynchronous-successive-halving)|✅|[✔️*][neps.optimizers.algorithms.asha]|❌|❌| diff --git a/neps/api.py b/neps/api.py index 6ee492e0a..ee92f846f 100644 --- a/neps/api.py +++ b/neps/api.py @@ -6,7 +6,6 @@ import shutil import warnings from collections.abc import Callable, Mapping, Sequence -from functools import partial from pathlib import Path from typing import TYPE_CHECKING, Any, Concatenate, Literal @@ -20,6 +19,7 @@ from neps.space.neps_spaces.neps_space import ( NepsCompatConverter, adjust_evaluation_pipeline_for_neps_space, + check_neps_space_compatibility, convert_neps_to_classic_search_space, ) from neps.space.neps_spaces.parameters import Pipeline @@ -40,7 +40,7 @@ logger = logging.getLogger(__name__) -def run( # noqa: PLR0913, C901, PLR0912 +def run( # noqa: PLR0913, C901 evaluate_pipeline: Callable[..., EvaluatePipelineReturn] | str, pipeline_space: ConfigurationSpace | Pipeline, *, @@ -265,10 +265,11 @@ def evaluate_pipeline(some_parameter: float) -> float: ```python neps.run( ..., - optimzier={ - "name": "priorband", - "sample_prior_first": True, - } + optimzier=("priorband", + { + "sample_prior_first": True, + } + ) ) ``` @@ -352,62 +353,23 @@ def __call__( # pipeline_space and only use the new NEPS optimizers. # If the optimizer is not a NEPS algorithm, we try to convert the pipeline_space - inner_optimizer = None - if isinstance(optimizer, partial): - inner_optimizer = optimizer.func - while isinstance(inner_optimizer, partial): - inner_optimizer = inner_optimizer.func - if ( - optimizer - not in ( - neps.optimizers.algorithms.neps_random_search, - neps.optimizers.algorithms.neps_priorband, - neps.optimizers.algorithms.neps_complex_random_search, - ) - and ( - not inner_optimizer - or inner_optimizer - not in ( - neps.optimizers.algorithms.neps_random_search, - neps.optimizers.algorithms.neps_priorband, - neps.optimizers.algorithms.neps_complex_random_search, - ) - ) - and optimizer != "auto" - ) and not warmstart_configs: + + neps_classic_space_compatibility = check_neps_space_compatibility(optimizer) + if neps_classic_space_compatibility in ["both", "classic"] and not warmstart_configs: converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space: - logger.info( - "The provided pipeline_space only contains basic HPO parameters, " - "converting it to a classic SearchSpace." - ) pipeline_space = converted_space # Optimizer check, if the search space is a Pipeline and the optimizer is not a NEPS # algorithm, we raise an error, as the optimizer is not compatible. if ( isinstance(pipeline_space, Pipeline) - and optimizer - not in ( - neps.optimizers.algorithms.neps_random_search, - neps.optimizers.algorithms.neps_priorband, - neps.optimizers.algorithms.neps_complex_random_search, - ) - and ( - not inner_optimizer - or inner_optimizer - not in ( - neps.optimizers.algorithms.neps_random_search, - neps.optimizers.algorithms.neps_priorband, - neps.optimizers.algorithms.neps_complex_random_search, - ) - ) - and optimizer != "auto" + and neps_classic_space_compatibility == "classic" ): raise ValueError( "The provided optimizer is not compatible with this complex search space. " - "Please use one of the NEPS optimizers, such as 'neps_random_search', " - "'neps_priorband', or 'neps_complex_random_search'." + "Please use one that is, such as 'random_search', " + "'priorband', or 'complex_random_search'." ) if isinstance(pipeline_space, Pipeline): diff --git a/neps/optimizers/__init__.py b/neps/optimizers/__init__.py index 8cff9e3e8..50afa8e03 100644 --- a/neps/optimizers/__init__.py +++ b/neps/optimizers/__init__.py @@ -1,6 +1,7 @@ from __future__ import annotations from collections.abc import Callable, Mapping +from functools import partial from typing import TYPE_CHECKING, Any, Concatenate, Literal from neps.optimizers.algorithms import ( @@ -71,18 +72,27 @@ def load_optimizer( # Provided optimizer initializer case _ if callable(optimizer): + inner_optimizer = None + if isinstance(optimizer, partial): + inner_optimizer = optimizer.func + while isinstance(inner_optimizer, partial): + inner_optimizer = inner_optimizer.func + else: + inner_optimizer = optimizer keywords = extract_keyword_defaults(optimizer) # Error catch and type ignore needed while we transition from SearchSpace to # Pipeline try: - _optimizer = optimizer(space) # type: ignore + _optimizer = inner_optimizer(space, **keywords) # type: ignore except TypeError as e: raise TypeError( - f"Optimizer {optimizer} does not accept a space of type" + f"Optimizer {inner_optimizer} does not accept a space of type" f" {type(space)}." ) from e - info = OptimizerInfo(name=optimizer.__name__, info=keywords) + + info = OptimizerInfo(name=inner_optimizer.__name__, info=keywords) + return _optimizer, info # Custom optimizer, we create it diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 5d0195ac6..2ef2d27ae 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -36,14 +36,17 @@ from neps.optimizers.models.ftpfn import FTPFNSurrogate from neps.optimizers.neps_bracket_optimizer import _NePSBracketOptimizer from neps.optimizers.neps_priorband import NePSPriorBandSampler -from neps.optimizers.neps_random_search import NePSComplexRandomSearch, NePSRandomSearch +from neps.optimizers.neps_random_search import ( + NePSComplexRandomSearch, + NePSRandomSearch, +) from neps.optimizers.optimizer import AskFunction # noqa: TC001 from neps.optimizers.priorband import PriorBandSampler from neps.optimizers.random_search import RandomSearch from neps.sampling import Prior, Sampler, Uniform from neps.space.encoding import CategoricalToUnitNorm, ConfigEncoder from neps.space.neps_spaces.neps_space import convert_neps_to_classic_search_space -from neps.space.neps_spaces.parameters import Pipeline +from neps.space.neps_spaces.parameters import Pipeline, Resolvable if TYPE_CHECKING: import pandas as pd @@ -403,7 +406,7 @@ def determine_optimizer_automatically(space: SearchSpace | Pipeline) -> str: if isinstance(space, Pipeline): if space.fidelity_attrs: return "neps_priorband" - return "neps_complex_random_search" + return "complex_random_search" has_prior = any( parameter.prior is not None for parameter in space.searchables.values() ) @@ -427,7 +430,7 @@ def random_search( *, use_priors: bool = False, ignore_fidelity: bool | Literal["highest fidelity"] = False, -) -> RandomSearch: +) -> RandomSearch | NePSRandomSearch: """A simple random search algorithm that samples configurations uniformly at random. You may also `use_priors=` to sample from a distribution centered around your defined @@ -444,9 +447,8 @@ def random_search( if converted_space is not None: pipeline_space = converted_space else: - raise ValueError( - "This optimizer only supports HPO search spaces, please use a NePS" - " space-compatible optimizer." + return neps_random_search( + pipeline_space, use_priors=use_priors, ignore_fidelity=ignore_fidelity ) assert ignore_fidelity in ( True, @@ -1050,7 +1052,7 @@ def priorband( sample_prior_first: bool | Literal["highest_fidelity"] = False, base: Literal["successive_halving", "hyperband", "asha", "async_hb"] = "hyperband", bayesian_optimization_kick_in_point: int | float | None = None, -) -> BracketOptimizer: +) -> BracketOptimizer | _NePSBracketOptimizer: """Priorband is also a bandit-based optimization algorithm that uses a _fidelity_, providing a general purpose sampling extension to other algorithms. It makes better use of the prior information you provide in the search space along with the fact @@ -1093,9 +1095,16 @@ def priorband( if converted_space is not None: pipeline_space = converted_space else: - raise ValueError( - "This optimizer only supports HPO search spaces, please use a NePS" - " space-compatible optimizer." + if bayesian_optimization_kick_in_point is not None: + raise ValueError( + "The priorband variant for this complex search space does not" + " support a bayesian optimization kick-in point yet." + ) + return neps_priorband( + pipeline_space, + eta=eta, + sample_prior_first=sample_prior_first, + base=base, ) if all(parameter.prior is None for parameter in pipeline_space.searchables.values()): logger.warning( @@ -1332,36 +1341,92 @@ def custom( ) -def neps_complex_random_search( - pipeline: Pipeline, - *_args: Any, - **_kwargs: Any, +def complex_random_search( + pipeline_space: Pipeline, + *, + ignore_fidelity: bool | Literal["highest fidelity"] = False, ) -> NePSComplexRandomSearch: """A complex random search algorithm that samples configurations uniformly at random, but allows for more complex sampling strategies. Args: pipeline: The search space to sample from. + ignore_fidelity: Whether to ignore the fidelity parameter when sampling. + If `True`, the algorithm will sample the fidelity like a normal parameter. + If set to `"highest fidelity"`, it will always sample at the highest fidelity. + Raises: + ValueError: If the pipeline has fidelity attributes and `ignore_fidelity` is + set to `False`. Complex random search does not support fidelities by default. """ + if pipeline_space.fidelity_attrs and ignore_fidelity is False: + raise ValueError( + "Complex Random Search does not support fidelities by default." + "Consider using `ignore_fidelity=True` or `highest fidelity`" + "to always sample at max fidelity." + ) + if not pipeline_space.fidelity_attrs and ignore_fidelity is not False: + logger.warning( + "You are using ignore_fidelity, but no fidelity is defined in the" + " search space. Consider setting ignore_fidelity to False." + ) + return NePSComplexRandomSearch( - pipeline=pipeline, + pipeline=pipeline_space, + ignore_fidelity=ignore_fidelity, ) def neps_random_search( - pipeline: Pipeline, - *_args: Any, - **_kwargs: Any, + pipeline_space: Pipeline, + *, + use_priors: bool = False, + ignore_fidelity: bool | Literal["highest fidelity"] = False, ) -> NePSRandomSearch: """A simple random search algorithm that samples configurations uniformly at random. Args: - pipeline: The search space to sample from. + pipeline_space: The search space to sample from. + use_priors: Whether to use priors when sampling. + If `True`, the algorithm will sample from the prior distribution + defined in the search space. + ignore_fidelity: Whether to ignore the fidelity parameter when sampling. + If `True`, the algorithm will sample the fidelity like a normal parameter. + If set to `"highest fidelity"`, it will always sample at the highest fidelity. + Raises: + ValueError: If the pipeline space has fidelity attributes and `ignore_fidelity` is + set to `False`. Random search does not support fidelities by default. """ + if pipeline_space.fidelity_attrs and ignore_fidelity is False: + raise ValueError( + "Random Search does not support fidelities by default." + "Consider using `ignore_fidelity=True` or `highest fidelity`" + "to always sample at max fidelity." + ) + if not pipeline_space.fidelity_attrs and ignore_fidelity is not False: + logger.warning( + "You are using ignore_fidelity, but no fidelity is defined in the" + " search space. Consider setting ignore_fidelity to False." + ) + parameters = pipeline_space.get_attrs().values() + non_fid_parameters = [ + parameter + for parameter in parameters + if parameter not in pipeline_space.fidelity_attrs.values() + ] + if use_priors and not any( + parameter.has_prior # type: ignore + for parameter in non_fid_parameters + if isinstance(parameter, Resolvable) + ): + raise ValueError( + "You have set use_priors=True, but no priors are defined in the search space." + "Consider using a different optimizer that supports priors." + ) + return NePSRandomSearch( - pipeline=pipeline, + pipeline=pipeline_space, use_priors=use_priors, ignore_fidelity=ignore_fidelity ) @@ -1471,7 +1536,7 @@ def _neps_bracket_optimizer( def neps_priorband( - space: Pipeline, + pipeline_space: Pipeline, *, eta: int = 3, sample_prior_first: bool | Literal["highest_fidelity"] = False, @@ -1480,7 +1545,7 @@ def neps_priorband( """Create a PriorBand optimizer for the given pipeline space. Args: - space: The pipeline space to optimize over. + pipeline_space: The pipeline space to optimize over. eta: The eta parameter for the algorithm. sample_prior_first: Whether to sample the prior first. If set to `"highest_fidelity"`, the prior will be sampled at the @@ -1493,8 +1558,23 @@ def neps_priorband( Returns: An instance of _BracketOptimizer configured for PriorBand sampling. """ + parameters = pipeline_space.get_attrs().values() + non_fid_parameters = [ + parameter + for parameter in parameters + if parameter not in pipeline_space.fidelity_attrs.values() + ] + if not any( + parameter.has_prior # type: ignore + for parameter in non_fid_parameters + if isinstance(parameter, Resolvable) + ): + logger.warning( + "Warning: No priors are defined in the search space, priorband will sample" + " uniformly. Consider using hyperband instead." + ) return _neps_bracket_optimizer( - pipeline_space=space, + pipeline_space=pipeline_space, bracket_type=base, eta=eta, sampler="priorband", @@ -1524,7 +1604,7 @@ def neps_priorband( async_hb, priorband, neps_random_search, - neps_complex_random_search, + complex_random_search, neps_priorband, ) } @@ -1543,6 +1623,6 @@ def neps_priorband( "grid_search", "ifbo", "neps_random_search", - "neps_complex_random_search", + "complex_random_search", "neps_priorband", ] diff --git a/neps/optimizers/neps_bracket_optimizer.py b/neps/optimizers/neps_bracket_optimizer.py index 6ca237acf..46d3cc9d5 100644 --- a/neps/optimizers/neps_bracket_optimizer.py +++ b/neps/optimizers/neps_bracket_optimizer.py @@ -150,7 +150,7 @@ def _sample_prior( # TODO: [lum] have a CenterSampler as fallback, not Random _try_always_priors_sampler = PriorOrFallbackSampler( fallback_sampler=RandomSampler(predefined_samplings={}), - prior_use_probability=1, + always_use_prior=True, ) _environment_values = {} diff --git a/neps/optimizers/neps_priorband.py b/neps/optimizers/neps_priorband.py index 874ecfa0f..5d82a9432 100644 --- a/neps/optimizers/neps_priorband.py +++ b/neps/optimizers/neps_priorband.py @@ -151,7 +151,7 @@ def _sample_prior(self) -> dict[str, Any]: fallback_sampler=neps.space.neps_spaces.sampling.RandomSampler( predefined_samplings={} ), - prior_use_probability=1, + always_use_prior=True, ) ) diff --git a/neps/optimizers/neps_random_search.py b/neps/optimizers/neps_random_search.py index 3b6904268..03b1a729f 100644 --- a/neps/optimizers/neps_random_search.py +++ b/neps/optimizers/neps_random_search.py @@ -8,9 +8,10 @@ import random from collections.abc import Mapping from dataclasses import dataclass -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Literal from neps.space.neps_spaces.neps_space import _prepare_sampled_configs, resolve +from neps.space.neps_spaces.parameters import Float, Integer from neps.space.neps_spaces.sampling import ( CrossoverByMixingSampler, CrossoverNotPossibleError, @@ -40,7 +41,12 @@ class NePSRandomSearch: ValueError: If the pipeline is not a Pipeline object. """ - def __init__(self, pipeline: Pipeline): + def __init__( + self, + pipeline: Pipeline, + use_priors: bool = False, # noqa: FBT001, FBT002 + ignore_fidelity: bool | Literal["highest fidelity"] = False, # noqa: FBT002 + ): """Initialize the RandomSearch optimizer with a pipeline. Args: @@ -54,9 +60,31 @@ def __init__(self, pipeline: Pipeline): self._environment_values = {} fidelity_attrs = self._pipeline.fidelity_attrs for fidelity_name, fidelity_obj in fidelity_attrs.items(): - self._environment_values[fidelity_name] = fidelity_obj.max_value + if ignore_fidelity == "highest fidelity": + self._environment_values[fidelity_name] = fidelity_obj.max_value + elif not ignore_fidelity: + raise ValueError( + "RandomSearch does not support fidelities by default. Consider using" + " a different optimizer or setting `ignore_fidelity=True` or `highest" + " fidelity`." + ) + # Sample randomly from the fidelity bounds. + elif isinstance(fidelity_obj._domain, Integer): + assert isinstance(fidelity_obj.min_value, int) + assert isinstance(fidelity_obj.max_value, int) + self._environment_values[fidelity_name] = random.randint( + fidelity_obj.min_value, fidelity_obj.max_value + ) + elif isinstance(fidelity_obj._domain, Float): + self._environment_values[fidelity_name] = random.uniform( + fidelity_obj.min_value, fidelity_obj.max_value + ) self._random_sampler = RandomSampler(predefined_samplings={}) + self.use_prior = use_priors + self._prior_sampler = PriorOrFallbackSampler( + fallback_sampler=self._random_sampler + ) def __call__( self, @@ -86,14 +114,24 @@ def __call__( n_requested = 1 if n is None else n return_single = n is None - chosen_pipelines = [ - resolve( - pipeline=self._pipeline, - domain_sampler=self._random_sampler, - environment_values=self._environment_values, - ) - for _ in range(n_requested) - ] + if self.use_prior: + chosen_pipelines = [ + resolve( + pipeline=self._pipeline, + domain_sampler=self._prior_sampler, + environment_values=self._environment_values, + ) + for _ in range(n_requested) + ] + else: + chosen_pipelines = [ + resolve( + pipeline=self._pipeline, + domain_sampler=self._random_sampler, + environment_values=self._environment_values, + ) + for _ in range(n_requested) + ] return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) @@ -111,7 +149,11 @@ class NePSComplexRandomSearch: ValueError: If the pipeline is not a Pipeline object. """ - def __init__(self, pipeline: Pipeline): + def __init__( + self, + pipeline: Pipeline, + ignore_fidelity: bool | Literal["highest fidelity"] = False, # noqa: FBT002 + ): """Initialize the ComplexRandomSearch optimizer with a pipeline. Args: @@ -125,19 +167,37 @@ def __init__(self, pipeline: Pipeline): self._environment_values = {} fidelity_attrs = self._pipeline.fidelity_attrs for fidelity_name, fidelity_obj in fidelity_attrs.items(): - self._environment_values[fidelity_name] = fidelity_obj.max_value + if ignore_fidelity == "highest fidelity": + self._environment_values[fidelity_name] = fidelity_obj.max_value + elif not ignore_fidelity: + raise ValueError( + "ComplexRandomSearch does not support fidelities by default. Consider" + " using a different optimizer or setting `ignore_fidelity=True` or" + " `highest fidelity`." + ) + # Sample randomly from the fidelity bounds. + elif isinstance(fidelity_obj._domain, Integer): + assert isinstance(fidelity_obj.min_value, int) + assert isinstance(fidelity_obj.max_value, int) + self._environment_values[fidelity_name] = random.randint( + fidelity_obj.min_value, fidelity_obj.max_value + ) + elif isinstance(fidelity_obj._domain, Float): + self._environment_values[fidelity_name] = random.uniform( + fidelity_obj.min_value, fidelity_obj.max_value + ) self._random_sampler = RandomSampler( predefined_samplings={}, ) self._try_always_priors_sampler = PriorOrFallbackSampler( fallback_sampler=self._random_sampler, - prior_use_probability=1, + always_use_prior=True, ) self._sometimes_priors_sampler = PriorOrFallbackSampler( - fallback_sampler=self._random_sampler, - prior_use_probability=0.1, + fallback_sampler=self._random_sampler ) + self._n_top_trials = 5 def __call__( self, @@ -199,9 +259,9 @@ def __call__( ) ) if len(successful_trials) > 0: - n_top_trials = 5 + self._n_top_trials = 5 top_trials = heapq.nsmallest( - n_top_trials, + self._n_top_trials, successful_trials, key=lambda trial: ( float(trial.report.objective_to_minimize) @@ -209,7 +269,7 @@ def __call__( and isinstance(trial.report.objective_to_minimize, float) else float("inf") ), - ) # Will have up to `n_top_trials` items. + ) # Will have up to `self._n_top_trials` items. # Do some mutations. for top_trial in top_trials: diff --git a/neps/sampling/priors.py b/neps/sampling/priors.py index 8e425f3f8..a8ee364ed 100644 --- a/neps/sampling/priors.py +++ b/neps/sampling/priors.py @@ -28,6 +28,8 @@ if TYPE_CHECKING: from torch.distributions import Distribution +PRIOR_CONFIDENCE_MAPPING = {"low": 0.25, "medium": 0.5, "high": 0.75} + class Prior(Sampler): """A protocol for priors over search spaces. @@ -128,7 +130,7 @@ def from_parameters( """Create a prior distribution from dict of parameters. Args: - parameters: The parameters to createa a prior from. Will look + parameters: The parameters to create a prior from. Will look at the `.prior` and `.prior_confidence` of the parameters to create a truncated normal. @@ -144,7 +146,7 @@ def from_parameters( Returns: The prior distribution """ - _mapping = {"low": 0.25, "medium": 0.5, "high": 0.75} + _mapping = PRIOR_CONFIDENCE_MAPPING center_values = center_values or {} confidence_values = confidence_values or {} diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 7261f67ec..017a845e1 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -8,15 +8,11 @@ import dataclasses import functools from collections.abc import Callable, Generator, Mapping -from typing import ( - TYPE_CHECKING, - Any, - TypeVar, - cast, -) +from functools import partial +from typing import TYPE_CHECKING, Any, Concatenate, Literal, TypeVar, cast import neps -from neps.optimizers import optimizer +from neps.optimizers import algorithms, optimizer from neps.space.neps_spaces import config_string from neps.space.neps_spaces.parameters import ( Categorical, @@ -989,3 +985,121 @@ def convert_neps_to_classic_search_space(space: Pipeline) -> SearchSpace | None: classic_space[key] = neps.HPOConstant(value) return convert_mapping(classic_space) return None + + +def check_neps_space_compatibility( + optimizer_to_check: ( + algorithms.OptimizerChoice + | Mapping[str, Any] + | tuple[algorithms.OptimizerChoice, Mapping[str, Any]] + | Callable[ + Concatenate[SearchSpace, ...], optimizer.AskFunction + ] # Hack, while we transit + | Callable[ + Concatenate[Pipeline, ...], optimizer.AskFunction + ] # from SearchSpace to + | Callable[ + Concatenate[SearchSpace | Pipeline, ...], optimizer.AskFunction + ] # Pipeline + | algorithms.CustomOptimizer + | Literal["auto"] + ) = "auto", +) -> Literal["neps", "classic", "both"]: + """Check if the given optimizer is compatible with a NePS space. + This function checks if the optimizer is a NePS-specific algorithm, + a classic algorithm, or a combination of both. + + Args: + optimizer_to_check: The optimizer to check for compatibility. + It can be a NePS-specific algorithm, a classic algorithm, + or a combination of both. + + Returns: + A string indicating the compatibility: + - "neps" if the optimizer is a NePS-specific algorithm, + - "classic" if the optimizer is a classic algorithm, + - "both" if the optimizer is a combination of both. + """ + inner_optimizer = None + if isinstance(optimizer_to_check, partial): + inner_optimizer = optimizer_to_check.func + while isinstance(inner_optimizer, partial): + inner_optimizer = inner_optimizer.func + + only_neps_algorithm = ( + optimizer_to_check + in ( + algorithms.neps_random_search, + algorithms.neps_priorband, + algorithms.complex_random_search, + ) + or ( + inner_optimizer + and inner_optimizer + in ( + algorithms.neps_random_search, + algorithms.neps_priorband, + algorithms.complex_random_search, + ) + ) + or optimizer_to_check == "auto" + or ( + optimizer_to_check[0] + in ( + "neps_random_search", + "neps_priorband", + "complex_random_search", + ) + if isinstance(optimizer_to_check, tuple) + else False + ) + or ( + optimizer_to_check + in ( + "neps_random_search", + "neps_priorband", + "complex_random_search", + ) + if isinstance(optimizer_to_check, str) + else False + ) + ) + if only_neps_algorithm: + return "neps" + neps_and_classic_algorithm = ( + optimizer + in ( + algorithms.random_search, + algorithms.priorband, + ) + or ( + inner_optimizer + and inner_optimizer + in ( + algorithms.random_search, + algorithms.priorband, + ) + ) + or optimizer_to_check == "auto" + or ( + optimizer_to_check[0] + in ( + "random_search", + "priorband", + ) + if isinstance(optimizer_to_check, tuple) + else False + ) + or ( + optimizer_to_check + in ( + "random_search", + "priorband", + ) + if isinstance(optimizer_to_check, str) + else False + ) + ) + if neps_and_classic_algorithm: + return "both" + return "classic" diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index d937c7625..a83b25915 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -76,7 +76,10 @@ def __init__(self, domain: Integer | Float): """ if domain.has_prior: - raise ValueError(f"The domain of a Fidelity can not have priors: {domain!r}.") + raise ValueError( + "The domain of a Fidelity can not have priors, has prior:" + f" {domain.prior!r}." + ) self._domain = domain @property @@ -866,10 +869,6 @@ def sample(self) -> int: Returns: A randomly selected integer value within the domain's range. - Raises: - NotImplementedError: If the domain is set to sample on a logarithmic - scale, as this is not implemented yet. - """ if self._log: return int( diff --git a/neps/space/neps_spaces/sampling.py b/neps/space/neps_spaces/sampling.py index a0ed14704..6b4df10e0 100644 --- a/neps/space/neps_spaces/sampling.py +++ b/neps/space/neps_spaces/sampling.py @@ -8,9 +8,15 @@ from collections.abc import Mapping from typing import Any, Protocol, TypeVar, cast, runtime_checkable +from scipy import stats + +from neps.sampling.priors import PRIOR_CONFIDENCE_MAPPING from neps.space.neps_spaces.parameters import ( + Categorical, ConfidenceLevel, Domain, + Float, + Integer, Pipeline, ) @@ -145,38 +151,23 @@ class PriorOrFallbackSampler(DomainSampler): Args: fallback_sampler: A DomainSampler to use if the prior is not available. - prior_use_probability: The probability of using the prior value when - available. This should be a float between 0 and 1, where 0 means never use - the prior and 1 means always use it. - - Raises: - ValueError: If the prior_use_probability is not between 0 and 1. + always_use_prior: If True, always use the prior value when available. """ def __init__( self, fallback_sampler: DomainSampler, - prior_use_probability: float, + always_use_prior: bool = False, # noqa: FBT001, FBT002 ): - """Initialize the sampler with a fallback sampler and a prior use probability. + """Initialize the sampler with a fallback sampler and a flag to always use the + prior. Args: fallback_sampler: A DomainSampler to use if the prior is not available. - prior_use_probability: The probability of using the prior value when - available. This should be a float between 0 and 1, where 0 means never - use the prior and 1 means always use it. - - Raises: - ValueError: If the prior_use_probability is not between 0 and 1. + always_use_prior: If True, always use the prior value when available. """ - if not 0 <= prior_use_probability <= 1: - raise ValueError( - "The given `prior_use_probability` value is out of range:" - f" {prior_use_probability!r}." - ) - self._fallback_sampler = fallback_sampler - self._prior_use_probability = prior_use_probability + self._always_use_prior = always_use_prior def __call__( self, @@ -185,7 +176,7 @@ def __call__( current_path: str, ) -> T: """Sample a value from the domain, using the prior if available and according to - the prior use probability. + the prior confidence probability. Args: domain_obj: The domain object from which to sample. @@ -198,13 +189,52 @@ def __call__( ValueError: If the domain does not have a prior defined and the fallback sampler is not provided. """ - use_prior = random.choices( - (True, False), - weights=(self._prior_use_probability, 1 - self._prior_use_probability), - k=1, - )[0] - if domain_obj.has_prior and use_prior: - return domain_obj.prior + if domain_obj.has_prior: + _prior_probability = PRIOR_CONFIDENCE_MAPPING.get( + domain_obj.prior_confidence.value, 0.5 + ) + if isinstance(domain_obj, Categorical) or self._always_use_prior: + if ( + random.choices( + (True, False), + weights=(_prior_probability, 1 - _prior_probability), + k=1, + )[0] + or self._always_use_prior + ): + # If the prior is defined, we sample from it. + return domain_obj.prior + + # For Integers and Floats, sample gaussians around the prior + + elif isinstance(domain_obj, Integer | Float): + # Sample an integer from a Gaussian distribution centered around the + # prior, cut of the tails to ensure the value is within the domain's + # range. Using the _prior_probability to determine the standard deviation + assert hasattr(domain_obj, "min_value") + assert hasattr(domain_obj, "max_value") + assert hasattr(domain_obj, "prior") + + std_dev = 1 / ( + 10 + * _prior_probability + / (domain_obj.max_value - domain_obj.min_value) # type: ignore + ) + + a = (domain_obj.min_value - domain_obj.prior) / std_dev # type: ignore + b = (domain_obj.max_value - domain_obj.prior) / std_dev # type: ignore + sampled_value = stats.truncnorm.rvs( + a=a, + b=b, + loc=domain_obj.prior, # type: ignore + scale=std_dev, + ) + if isinstance(domain_obj, Integer): + sampled_value = int(round(sampled_value)) + else: + sampled_value = float(sampled_value) # type: ignore + return cast(T, sampled_value) + return self._fallback_sampler( domain_obj=domain_obj, current_path=current_path, diff --git a/neps/state/neps_state.py b/neps/state/neps_state.py index 0e684a428..6b5d8f21b 100644 --- a/neps/state/neps_state.py +++ b/neps/state/neps_state.py @@ -653,7 +653,7 @@ def _deserialize_optimizer_info(path: Path) -> OptimizerInfo: deserialized = deserialize(path) if "name" not in deserialized or "info" not in deserialized: raise NePSError( - f"Invalid optimizer info deserialized from" + "Invalid optimizer info deserialized from" f" {path}. Did not find" " keys 'name' and 'info'." ) diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index 263b69724..95cb10227 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -1,12 +1,13 @@ from __future__ import annotations from collections.abc import Callable, Sequence +from functools import partial import pytest import neps import neps.optimizers -import neps.optimizers.algorithms +from neps.optimizers import algorithms from neps.space.neps_spaces.parameters import ( Categorical, ConfidenceLevel, @@ -155,13 +156,13 @@ class DemoHyperparameterComplexSpace(Pipeline): @pytest.mark.parametrize( "optimizer", [ - neps.optimizers.algorithms.neps_random_search, - neps.optimizers.algorithms.neps_complex_random_search, + partial(algorithms.neps_random_search, ignore_fidelity=True), + partial(algorithms.complex_random_search, ignore_fidelity=True), ], ) def test_hyperparameter_demo(optimizer): pipeline_space = DemoHyperparameterSpace() - root_directory = f"results/hyperparameter_demo__{optimizer.__name__}" + root_directory = f"results/hyperparameter_demo__{optimizer.func.__name__}" neps.run( evaluate_pipeline=hyperparameter_pipeline_to_optimize, @@ -178,13 +179,15 @@ def test_hyperparameter_demo(optimizer): @pytest.mark.parametrize( "optimizer", [ - neps.optimizers.algorithms.neps_random_search, - neps.optimizers.algorithms.neps_complex_random_search, + partial(algorithms.neps_random_search, ignore_fidelity=True), + partial(algorithms.complex_random_search, ignore_fidelity=True), ], ) def test_hyperparameter_with_fidelity_demo(optimizer): pipeline_space = DemoHyperparameterWithFidelitySpace() - root_directory = f"results/hyperparameter_with_fidelity_demo__{optimizer.__name__}" + root_directory = ( + f"results/hyperparameter_with_fidelity_demo__{optimizer.func.__name__}" + ) neps.run( evaluate_pipeline=hyperparameter_pipeline_to_optimize, @@ -201,13 +204,13 @@ def test_hyperparameter_with_fidelity_demo(optimizer): @pytest.mark.parametrize( "optimizer", [ - neps.optimizers.algorithms.neps_random_search, - neps.optimizers.algorithms.neps_complex_random_search, + partial(algorithms.neps_random_search, ignore_fidelity=True), + partial(algorithms.complex_random_search, ignore_fidelity=True), ], ) def test_hyperparameter_complex_demo(optimizer): pipeline_space = DemoHyperparameterComplexSpace() - root_directory = f"results/hyperparameter_complex_demo__{optimizer.__name__}" + root_directory = f"results/hyperparameter_complex_demo__{optimizer.func.__name__}" neps.run( evaluate_pipeline=hyperparameter_pipeline_to_optimize, @@ -327,7 +330,7 @@ class DemoOperationSpace(Pipeline): "optimizer", [ neps.optimizers.algorithms.neps_random_search, - neps.optimizers.algorithms.neps_complex_random_search, + neps.optimizers.algorithms.complex_random_search, ], ) def test_operation_demo(optimizer): diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index ab014490e..9292be0d7 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -65,11 +65,11 @@ class DemoHyperparameterWithFidelitySpace(Pipeline): ("optimizer", "optimizer_name"), [ ( - algorithms.neps_random_search, + partial(algorithms.neps_random_search, ignore_fidelity=True), "new__RandomSearch", ), ( - algorithms.neps_complex_random_search, + partial(algorithms.complex_random_search, ignore_fidelity=True), "new__ComplexRandomSearch", ), ( diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index 4842eaabd..40ea2aba6 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -52,11 +52,11 @@ class DemoHyperparameterWithFidelitySpace(Pipeline): ("optimizer", "optimizer_name"), [ ( - algorithms.neps_random_search, + partial(algorithms.neps_random_search, ignore_fidelity=True), "new__RandomSearch", ), ( - algorithms.neps_complex_random_search, + partial(algorithms.complex_random_search, ignore_fidelity=True), "new__ComplexRandomSearch", ), ( diff --git a/tests/test_neps_space/test_search_space__fidelity.py b/tests/test_neps_space/test_search_space__fidelity.py index 8a76a2f73..903253f9a 100644 --- a/tests/test_neps_space/test_search_space__fidelity.py +++ b/tests/test_neps_space/test_search_space__fidelity.py @@ -35,7 +35,7 @@ def test_fidelity_creation_raises_when_domain_has_prior(): # Creating a fidelity object with a domain that has a prior should not be possible. with pytest.raises( ValueError, - match=re.escape("The domain of a Fidelity can not have priors: "), + match=re.escape("The domain of a Fidelity can not have priors, has prior: 10"), ): Fidelity( domain=Integer( diff --git a/tests/test_state/test_neps_state.py b/tests/test_state/test_neps_state.py index 0189e38e2..fd64c43be 100644 --- a/tests/test_state/test_neps_state.py +++ b/tests/test_state/test_neps_state.py @@ -57,10 +57,10 @@ class SpaceFid(Pipeline): @case def case_search_space_no_fid_with_prior() -> Pipeline: class SpacePrior(Pipeline): - a = Float(0, 1, prior=0.5) - b = Categorical(("a", "b", "c"), prior=0) + a = Float(0, 1, prior=0.5, prior_confidence="medium") + b = Categorical(("a", "b", "c"), prior=0, prior_confidence="medium") c = "a" - d = Integer(0, 10, prior=5) + d = Integer(0, 10, prior=5, prior_confidence="medium") return SpacePrior() @@ -68,10 +68,10 @@ class SpacePrior(Pipeline): @case def case_search_space_fid_with_prior() -> Pipeline: class SpaceFidPrior(Pipeline): - a = Float(0, 1, prior=0.5) - b = Categorical(("a", "b", "c"), prior=0) + a = Float(0, 1, prior=0.5, prior_confidence="medium") + b = Categorical(("a", "b", "c"), prior=0, prior_confidence="medium") c = "a" - d = Integer(0, 10, prior=5) + d = Integer(0, 10, prior=5, prior_confidence="medium") e = Fidelity(Integer(1, 10)) return SpaceFidPrior() @@ -103,6 +103,8 @@ class SpaceFidPrior(Pipeline): "grid_search", "bayesian_optimization", "pibo", + "neps_random_search", + "complex_random_search", ] NO_DEFAULT_PRIOR_SUPPORT = [ "grid_search", @@ -115,6 +117,8 @@ class SpaceFidPrior(Pipeline): "random_search", "moasha", "mo_hyperband", + "neps_random_search", + "complex_random_search", ] REQUIRES_PRIOR = [ "pibo", @@ -124,7 +128,7 @@ class SpaceFidPrior(Pipeline): REQUIRES_NEPS_SPACE = [ "neps_priorband", "neps_random_search", - "neps_complex_random_search", + "complex_random_search", ] From 37e3a89f74ce22331d90fb3126971f79a6219ef8 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 10 Jul 2025 23:20:52 +0200 Subject: [PATCH 034/156] Fix variable name in check_neps_space_compatibility function --- neps/space/neps_spaces/neps_space.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 017a845e1..d4712ddbe 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -1067,7 +1067,7 @@ def check_neps_space_compatibility( if only_neps_algorithm: return "neps" neps_and_classic_algorithm = ( - optimizer + optimizer_to_check in ( algorithms.random_search, algorithms.priorband, From b0b8e05e8f9a3c7e356a1c47b8d58eb4f43c4d0c Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 10 Jul 2025 23:36:42 +0200 Subject: [PATCH 035/156] Enhance status function to support NePS-only optimizers; integrate NepsCompatConverter for better pipeline configuration handling. --- neps/status/status.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/neps/status/status.py b/neps/status/status.py index bafb39c45..e99cbc53c 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -14,6 +14,8 @@ from neps.runtime import get_workers_neps_state from neps.space.neps_spaces import neps_space +from neps.space.neps_spaces.neps_space import NepsCompatConverter +from neps.space.neps_spaces.sampling import OnlyPredefinedValuesSampler from neps.state.neps_state import FileLocker, NePSState from neps.state.trial import State, Trial @@ -132,11 +134,20 @@ def formatted( if pipeline_space_variables is None: best_summary += f"\n config: {best_trial.config}" else: + best_config_resolve = NepsCompatConverter().from_neps_config( + best_trial.config + ) pipeline_configs = [ neps_space.config_string.ConfigString( neps_space.convert_operation_to_string( getattr( - neps_space.resolve(pipeline_space_variables[0])[0], + neps_space.resolve( + pipeline_space_variables[0], + OnlyPredefinedValuesSampler( + best_config_resolve.predefined_samplings + ), + environment_values=best_config_resolve.environment_values, + )[0], variable, ) ) @@ -230,6 +241,17 @@ def status( for pipelines that have a complex configuration structure, allowing for a more readable output. + !!! Warning: + + This is only supported when using NePS-only optimizers, such as + `neps.algorithms.neps_random_search`, + `neps.algorithms.complex_random_search` + or `neps.algorithms.neps_priorband`. When the search space is + simple enough, using `neps.algorithms.random_search` or + `neps.algorithms.priorband` is not enough, as it will be transformed to a + simpler HPO framework, which is incompatible with the + `pipeline_space_variables` argument. + Returns: Dataframe of full results and short summary series. """ From d467011fcad94fb78d93073ebf856cc3f5a11721 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 10 Jul 2025 23:40:28 +0200 Subject: [PATCH 036/156] Enhance formatted summary method documentation to clarify usage of pipeline_space_variables and NePS-only optimizers. --- neps/status/status.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/neps/status/status.py b/neps/status/status.py index e99cbc53c..06e1371b8 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -109,7 +109,28 @@ def num_pending(self) -> int: def formatted( self, pipeline_space_variables: tuple[Pipeline, list[str]] | None = None ) -> str: - """Return a formatted string of the summary.""" + """Return a formatted string of the summary. + + Args: + pipeline_space_variables: If provided, this tuple contains the Pipeline and a + list of variable names to format the config in the summary. This is useful + for pipelines that have a complex configuration structure, allowing for a + more readable output. + + !!! Warning: + + This is only supported when using NePS-only optimizers, such as + `neps.algorithms.neps_random_search`, + `neps.algorithms.complex_random_search` + or `neps.algorithms.neps_priorband`. When the search space is + simple enough, using `neps.algorithms.random_search` or + `neps.algorithms.priorband` is not enough, as it will be transformed + to a simpler HPO framework, which is incompatible with the + `pipeline_space_variables` argument. + + Returns: + A formatted string of the summary. + """ state_summary = "\n".join( f" {state.name.lower()}: {len(trials)}" for state, trials in self.by_state.items() From fbf89b35f72a59180e8b34abd109c60f159e8541 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 10 Jul 2025 23:55:18 +0200 Subject: [PATCH 037/156] Add warnings for warmstarting compatibility with NEPS optimizers in run and warmstart_neps functions --- neps/api.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/neps/api.py b/neps/api.py index ee92f846f..10b4fea16 100644 --- a/neps/api.py +++ b/neps/api.py @@ -319,6 +319,12 @@ def __call__( function, i.e. the objective value to minimize or a dictionary with `"objective_to_minimize"` and `"cost"` keys. + !!! warning "Warmstarting compatibility" + + The warmstarting feature is only compatible with the new NEPS optimizers, + such as `neps.algorithms.neps_random_search`, `neps.algorithms.neps_priorband`, + and `neps.algorithms.complex_random_search`. + """ # noqa: E501 if ( max_evaluations_total is None @@ -469,6 +475,13 @@ def warmstart_neps( optimizer: The optimizer to use for the warmstart. This can be a string, a callable, or a tuple of a callable and a dictionary of parameters. If "auto", the optimizer will be chosen based on the pipeline space. + + !!! warning "Warmstarting compatibility" + + The warmstarting feature is only compatible with the new NEPS optimizers, + such as `neps.algorithms.neps_random_search`, + `neps.algorithms.neps_priorband`, and + `neps.algorithms.complex_random_search`. """ logger.info( "Warmstarting neps.run with the provided" @@ -498,14 +511,10 @@ def warmstart_neps( ) ask_tell = AskAndTell(optimizer=optimizer_ask, worker_id="warmstart_worker") - if pipeline_space.fidelity_attrs: - assert isinstance( - optimizer_ask, - neps.optimizers.neps_bracket_optimizer._NePSBracketOptimizer, - ), ( - "The optimizer must be a NePSBracketOptimizer when using fidelity" - " attributes." - ) + if pipeline_space.fidelity_attrs and isinstance( + optimizer_ask, + neps.optimizers.neps_bracket_optimizer._NePSBracketOptimizer, + ): rung_to_fid = optimizer_ask.rung_to_fid fid_to_rung = { v: max(k for k, val in rung_to_fid.items() if val == v) From 63a6661a3a7426c6e19b1713a7f2a4dfdac13a9b Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 11 Jul 2025 00:20:50 +0200 Subject: [PATCH 038/156] Add compatibility check for warmstarting in warmstart_neps function and enhance parameter filtering in neps_random_search and neps_priorband functions --- neps/api.py | 6 ++++++ neps/optimizers/algorithms.py | 10 +++++++++- neps/space/neps_spaces/neps_space.py | 6 +++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/neps/api.py b/neps/api.py index 10b4fea16..084defbea 100644 --- a/neps/api.py +++ b/neps/api.py @@ -483,6 +483,12 @@ def warmstart_neps( `neps.algorithms.neps_priorband`, and `neps.algorithms.complex_random_search`. """ + if check_neps_space_compatibility(optimizer) != "neps": + raise ValueError( + "The provided optimizer is not compatible with the warmstarting feature. " + "Please use one that is, such as 'neps_random_search', 'neps_priorband', " + "or 'complex_random_search'." + ) logger.info( "Warmstarting neps.run with the provided" f" {len(warmstart_configs)} configurations using root directory" diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 2ef2d27ae..a6613f489 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -46,7 +46,13 @@ from neps.sampling import Prior, Sampler, Uniform from neps.space.encoding import CategoricalToUnitNorm, ConfigEncoder from neps.space.neps_spaces.neps_space import convert_neps_to_classic_search_space -from neps.space.neps_spaces.parameters import Pipeline, Resolvable +from neps.space.neps_spaces.parameters import ( + Categorical, + Float, + Integer, + Pipeline, + Resolvable, +) if TYPE_CHECKING: import pandas as pd @@ -1419,6 +1425,7 @@ def neps_random_search( parameter.has_prior # type: ignore for parameter in non_fid_parameters if isinstance(parameter, Resolvable) + and isinstance(parameter, Integer | Float | Categorical) ): raise ValueError( "You have set use_priors=True, but no priors are defined in the search space." @@ -1563,6 +1570,7 @@ def neps_priorband( parameter for parameter in parameters if parameter not in pipeline_space.fidelity_attrs.values() + and isinstance(parameter, Integer | Float | Categorical) ] if not any( parameter.has_prior # type: ignore diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index d4712ddbe..99b874f3e 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -895,7 +895,11 @@ def inner(*args: Any, **kwargs: Any) -> Any: for name, value in config.items(): if isinstance(value, Operation): - config[name] = operation_converter(value) + # If the operator is a not a string, we convert it to a callable. + if not isinstance(value.operator, str): + config[name] = value.operator + else: + config[name] = operation_converter(value) # So that we still pass the kwargs not related to the config, # start with the extra kwargs we passed to the converter. From f8b560a69634133bbad3328a96855e44dc1596d3 Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 11 Jul 2025 16:01:43 +0200 Subject: [PATCH 039/156] Enhance NEPS functions to support internal runtime calls and improve pipeline configuration formatting --- neps/api.py | 13 +++- neps/space/neps_spaces/config_string.py | 40 ++++++++----- neps/space/neps_spaces/neps_space.py | 2 +- neps/status/status.py | 80 ++++++++++++++----------- 4 files changed, 83 insertions(+), 52 deletions(-) diff --git a/neps/api.py b/neps/api.py index 084defbea..a307fe8b7 100644 --- a/neps/api.py +++ b/neps/api.py @@ -348,6 +348,7 @@ def __call__( warmstart_configs=warmstart_configs, optimizer=optimizer, overwrite_working_directory=overwrite_working_directory, + inside_neps=True, ) overwrite_working_directory = False @@ -457,6 +458,7 @@ def warmstart_neps( | CustomOptimizer | Literal["auto"] ) = "auto", + inside_neps: bool = False, # noqa: FBT001, FBT002 ) -> None: """Warmstart the NePS state with given configurations. This is useful for testing and debugging purposes, where you want to @@ -482,8 +484,17 @@ def warmstart_neps( such as `neps.algorithms.neps_random_search`, `neps.algorithms.neps_priorband`, and `neps.algorithms.complex_random_search`. + inside_neps: If True, the function is called from within the NEPS runtime. + This is used to avoid checking the compatibility of the optimizer with the + warmstarting feature, as this is already done in the NEPS runtime. + If False, the function will check if the optimizer is compatible with the + warmstarting feature and raise an error if it is not. + + Raises: + ValueError: If the optimizer is not compatible with the warmstarting feature. + ValueError: If the warmstart config already exists in the root directory. """ - if check_neps_space_compatibility(optimizer) != "neps": + if not inside_neps and check_neps_space_compatibility(optimizer) != "neps": raise ValueError( "The provided optimizer is not compatible with the warmstarting feature. " "Please use one that is, such as 'neps_random_search', 'neps_priorband', " diff --git a/neps/space/neps_spaces/config_string.py b/neps/space/neps_spaces/config_string.py index 62ed3ec55..395820d78 100644 --- a/neps/space/neps_spaces/config_string.py +++ b/neps/space/neps_spaces/config_string.py @@ -127,13 +127,32 @@ def wrap_config_into_string( if item.level > current_level: if item.hyperparameters not in ("{}", ""): - value = " (" + str(item.operator) + " " + item.hyperparameters + value = ( + " (" + + str( + item.operator.__name__ + if callable(item.operator) + else item.operator + ) + + " " + + item.hyperparameters + ) else: - value = " (" + str(item.operator) + value = " (" + str( + item.operator.__name__ if callable(item.operator) else item.operator + ) elif item.level < current_level: - value = ")" * (current_level - item.level + 1) + " (" + str(item.operator) + value = ( + ")" * (current_level - item.level + 1) + + " (" + + str( + item.operator.__name__ if callable(item.operator) else item.operator + ) + ) else: - value = ") (" + str(item.operator) + value = ") (" + str( + item.operator.__name__ if callable(item.operator) else item.operator + ) current_level = item.level result.append(value) result.append(")" * current_level) @@ -155,7 +174,6 @@ def wrap_config_into_string( if replace_individual: result_string = result_string.replace(f"({op})", f"{op}") result_string = result_string.replace("__TMP_PLACEHOLDER___", f"{op} {op}") - return result_string @@ -206,15 +224,9 @@ def unwrapped(self) -> tuple[UnwrappedConfigStringPart, ...]: if not unwrapped: raise ValueError(f"Error unwrapping config string: {self.config_string}") - # NOTE: slow test that can possibly be removed - # test that meaning was preserved between wrapping and unwrapping - # to make sure the config string wrapping/unwrapping is working well - rewrapped_config = wrap_config_into_string(unwrapped_config=unwrapped) - assert self.config_string == rewrapped_config, ( - "Error during wrapping unwrapping: config_string != rewrapped_config_string", - self.config_string, - rewrapped_config, - ) + # NOTE: Previously, here was a test that compared wrap_config_into_string + # (unwrapped_config=unwrapped) to unwrapped. As it frequently failed and was + # deemed to be unnecessary, it was removed self._unwrapped = unwrapped return self._unwrapped diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 99b874f3e..d185cbf05 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -896,7 +896,7 @@ def inner(*args: Any, **kwargs: Any) -> Any: for name, value in config.items(): if isinstance(value, Operation): # If the operator is a not a string, we convert it to a callable. - if not isinstance(value.operator, str): + if isinstance(value.operator, str): config[name] = value.operator else: config[name] = operation_converter(value) diff --git a/neps/status/status.py b/neps/status/status.py index 06e1371b8..6366bc5b8 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -106,7 +106,7 @@ def num_pending(self) -> int: """Number of trials that are pending.""" return len(self.by_state[State.PENDING]) - def formatted( + def formatted( # noqa: PLR0912, C901 self, pipeline_space_variables: tuple[Pipeline, list[str]] | None = None ) -> str: """Return a formatted string of the summary. @@ -158,44 +158,52 @@ def formatted( best_config_resolve = NepsCompatConverter().from_neps_config( best_trial.config ) - pipeline_configs = [ - neps_space.config_string.ConfigString( - neps_space.convert_operation_to_string( - getattr( - neps_space.resolve( - pipeline_space_variables[0], - OnlyPredefinedValuesSampler( - best_config_resolve.predefined_samplings - ), - environment_values=best_config_resolve.environment_values, - )[0], - variable, + pipeline_configs = [] + for variable in pipeline_space_variables[1]: + pipeline_configs.append( + neps_space.config_string.ConfigString( + neps_space.convert_operation_to_string( + getattr( + neps_space.resolve( + pipeline_space_variables[0], + OnlyPredefinedValuesSampler( + best_config_resolve.predefined_samplings + ), + environment_values=best_config_resolve.environment_values, + )[0], + variable, + ) ) - ) - ).pretty_format() - for variable in pipeline_space_variables[1] - ] - for pipeline_config in pipeline_configs: - # Replace literal \t and \n with actual formatting - formatted_config = pipeline_config.replace("\\t", " ").replace( - "\\n", "\n" + ).pretty_format() ) - # Add proper indentation to each line - lines = formatted_config.split("\n") - indented_lines = [] - for i, line in enumerate(lines): - if i == 0: - indented_lines.append( - line - ) # First line gets base indentation - else: - indented_lines.append( - " " + line - ) # Subsequent lines get extra indentation - - formatted_config = "\n".join(indented_lines) - best_summary += f"\n config:\n {formatted_config}" + for n_pipeline, pipeline_config in enumerate(pipeline_configs): + if isinstance(pipeline_config, str): + # Replace literal \t and \n with actual formatting + formatted_config = pipeline_config.replace("\\t", " ").replace( + "\\n", "\n" + ) + + # Add proper indentation to each line + lines = formatted_config.split("\n") + indented_lines = [] + for i, line in enumerate(lines): + if i == 0: + indented_lines.append( + line + ) # First line gets base indentation + else: + indented_lines.append( + " " + line + ) # Subsequent lines get extra indentation + + formatted_config = "\n".join(indented_lines) + else: + formatted_config = pipeline_config # type: ignore + best_summary += ( + f"\n config: {pipeline_space_variables[1][n_pipeline]}\n " + f" {formatted_config}" + ) best_summary += f"\n path: {best_trial.metadata.location}" From 79ae4d5742e8797dab01d767cb0da707e18587fa Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 11 Jul 2025 16:32:15 +0200 Subject: [PATCH 040/156] Add warnings for repeated warmstarting in warmstart_neps function and include location parameter in AskAndTell class for trial metadata --- neps/api.py | 11 +++++++++-- neps/optimizers/ask_and_tell.py | 6 +++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/neps/api.py b/neps/api.py index a307fe8b7..86070ab2d 100644 --- a/neps/api.py +++ b/neps/api.py @@ -472,8 +472,14 @@ def warmstart_neps( The configuration is a dictionary of parameter values, the environment values are also a dictionary, and the result is the evaluation result. overwrite_working_directory: If True, the working directory will be deleted before - starting the warmstart. This is useful for testing and debugging purposes, - where you want to start with a clean state. + starting the warmstart. + + !!! warning "Repeated warmstarting" + + When not overwriting the working directory, starting multiple NePS + instances will result in an error. Instead, use warmstart_neps once + on its own and then start the NePS instances. + optimizer: The optimizer to use for the warmstart. This can be a string, a callable, or a tuple of a callable and a dictionary of parameters. If "auto", the optimizer will be chosen based on the pipeline space. @@ -561,6 +567,7 @@ def warmstart_neps( config=config, result=rung_result, previous_trial_id=f"{n_config}_{rung - 1}" if rung > 0 else None, + location=root_directory / "configs" / config_path, ) trial.config = NepsCompatConverter.to_neps_config(resolution_context) if (root_directory / config_path).is_dir(): diff --git a/neps/optimizers/ask_and_tell.py b/neps/optimizers/ask_and_tell.py index a1119f00d..62193ddee 100644 --- a/neps/optimizers/ask_and_tell.py +++ b/neps/optimizers/ask_and_tell.py @@ -68,6 +68,7 @@ import time from collections.abc import Mapping from dataclasses import dataclass, field +from pathlib import Path from typing import TYPE_CHECKING, Any, Literal, overload from neps.optimizers.optimizer import AskFunction, SampledConfig @@ -175,6 +176,7 @@ def tell_custom( previous_trial_id: str | None = None, worker_id: str | None = None, traceback_str: str | None = None, + location: Path | None = None, ) -> Trial: """Report a custom configuration and result to the optimizer. @@ -202,6 +204,8 @@ def tell_custom( metadata if you need. traceback_str: The traceback of any error, only to fill in metadata if you need. + location: The location of the configuration, if any. This will be saved + in the created trial's metadata. Returns: The trial object that was created. You can find the report @@ -227,7 +231,7 @@ def tell_custom( # Just go through the motions of the trial life-cycle trial = Trial.new( trial_id=config_id, - location="", + location=str(location.resolve()) if location else "", config=config, previous_trial=previous_trial_id, previous_trial_location="", From 801687b4af0be202efb8b73863f8ff3165c9a01a Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 11 Jul 2025 16:56:18 +0200 Subject: [PATCH 041/156] Add error handling and logging for pipeline space resolution in warmstart_neps function --- neps/api.py | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/neps/api.py b/neps/api.py index 86070ab2d..ef191ff22 100644 --- a/neps/api.py +++ b/neps/api.py @@ -509,7 +509,7 @@ def warmstart_neps( logger.info( "Warmstarting neps.run with the provided" f" {len(warmstart_configs)} configurations using root directory" - f" {root_directory}." + f" {root_directory}" ) root_directory = Path(root_directory) if overwrite_working_directory and root_directory.is_dir(): @@ -525,13 +525,20 @@ def warmstart_neps( ), ) for n_config, (config, env, result) in enumerate(warmstart_configs): - _, resolution_context = neps_space.resolve( - pipeline=pipeline_space, - domain_sampler=neps_space.OnlyPredefinedValuesSampler( - predefined_samplings=config - ), - environment_values=env, - ) + try: + _, resolution_context = neps_space.resolve( + pipeline=pipeline_space, + domain_sampler=neps_space.OnlyPredefinedValuesSampler( + predefined_samplings=config + ), + environment_values=env, + ) + except ValueError as e: + logger.error( + "Failed to resolve the pipeline space with the provided config:" + f" {config} and env: {env}.", + ) + raise e ask_tell = AskAndTell(optimizer=optimizer_ask, worker_id="warmstart_worker") if pipeline_space.fidelity_attrs and isinstance( @@ -584,6 +591,9 @@ def warmstart_neps( report=trial.report, worker_id=trial.metadata.evaluating_worker_id, ) + logger.info( + f"Warmstarted config {config_path} with result: {rung_result}." + ) else: config_path = f"{n_config}" @@ -606,6 +616,7 @@ def warmstart_neps( report=trial.report, worker_id=trial.metadata.evaluating_worker_id, ) + logger.info(f"Warmstarted config {config_path} with result: {result}.") __all__ = ["run", "warmstart_neps"] From 10d3104292802a05a10918995bb7cee184389888 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 13 Jul 2025 16:46:45 +0200 Subject: [PATCH 042/156] Add timing parameters to warmstart_neps function for performance tracking --- neps/api.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/neps/api.py b/neps/api.py index ef191ff22..872df1270 100644 --- a/neps/api.py +++ b/neps/api.py @@ -4,6 +4,7 @@ import logging import shutil +import time import warnings from collections.abc import Callable, Mapping, Sequence from pathlib import Path @@ -573,6 +574,9 @@ def warmstart_neps( config_id=config_path, config=config, result=rung_result, + time_sampled=time.time(), + time_started=time.time(), + time_end=time.time(), previous_trial_id=f"{n_config}_{rung - 1}" if rung > 0 else None, location=root_directory / "configs" / config_path, ) @@ -601,6 +605,9 @@ def warmstart_neps( config_id=config_path, config=config, result=result, + time_sampled=time.time(), + time_started=time.time(), + time_end=time.time(), ) trial.config = NepsCompatConverter.to_neps_config(resolution_context) if (root_directory / config_path).is_dir(): From eaa66e34b7edcb757ab8dfdd3241c67673cd1de6 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 13 Jul 2025 16:53:14 +0200 Subject: [PATCH 043/156] Add location parameter to warmstart_neps function for configuration tracking --- neps/api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/neps/api.py b/neps/api.py index 872df1270..7fc0491f6 100644 --- a/neps/api.py +++ b/neps/api.py @@ -608,6 +608,7 @@ def warmstart_neps( time_sampled=time.time(), time_started=time.time(), time_end=time.time(), + location=root_directory / "configs" / config_path, ) trial.config = NepsCompatConverter.to_neps_config(resolution_context) if (root_directory / config_path).is_dir(): From 9547d5a7b6893d66a8d7cb0ec7c8d229660ef11e Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 13 Jul 2025 18:37:45 +0200 Subject: [PATCH 044/156] Add warning log for missing priors when use_priors is set to True in neps_random_search function --- neps/optimizers/algorithms.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index a6613f489..e0267a259 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -1427,9 +1427,8 @@ def neps_random_search( if isinstance(parameter, Resolvable) and isinstance(parameter, Integer | Float | Categorical) ): - raise ValueError( + logger.warning( "You have set use_priors=True, but no priors are defined in the search space." - "Consider using a different optimizer that supports priors." ) return NePSRandomSearch( From 88da9be1b09976c68f5ab9ed879a5df9d3131638 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 28 Jul 2025 23:52:13 +0200 Subject: [PATCH 045/156] Refactor NEPS Pipeline classes to use PipelineSpace - Updated all instances of the Pipeline class to PipelineSpace across various examples and tests. - Renamed classes for clarity, such as changing PipelineSpace to HPOSpace where applicable. - Adjusted imports and class definitions in multiple files to ensure consistency with the new structure. - Enhanced code readability by formatting and organizing parameters in the PipelineSpace classes. - Ensured that all references to the old Pipeline class are replaced to maintain functionality. --- docs/getting_started.md | 4 +- docs/index.md | 4 +- docs/reference/neps_run.md | 12 +- docs/reference/neps_spaces.md | 208 ++++++++++-------- docs/reference/optimizers.md | 2 +- .../search_algorithms/landing_page_algo.md | 4 +- neps/__init__.py | 4 +- neps/api.py | 26 +-- neps/optimizers/__init__.py | 10 +- neps/optimizers/algorithms.py | 76 +++---- neps/optimizers/bayesian_optimization.py | 6 +- neps/optimizers/bracket_optimizer.py | 6 +- neps/optimizers/ifbo.py | 6 +- neps/optimizers/neps_bracket_optimizer.py | 4 +- neps/optimizers/neps_priorband.py | 4 +- neps/optimizers/neps_random_search.py | 6 +- neps/optimizers/random_search.py | 6 +- neps/space/neps_spaces/neps_space.py | 14 +- neps/space/neps_spaces/parameters.py | 8 +- neps/space/neps_spaces/sampling.py | 4 +- neps/space/parsing.py | 8 +- neps/status/status.py | 6 +- neps_examples/basic_usage/hyperparameters.py | 16 +- neps_examples/basic_usage/priors_test.ipynb | 186 ++++++++++++++++ .../basic_usage/pytorch_nn_example.py | 45 +++- .../convenience/logging_additional_info.py | 4 +- .../convenience/neps_tblogger_tutorial.py | 6 +- .../convenience/running_on_slurm_scripts.py | 4 +- .../working_directory_per_pipeline.py | 4 +- .../expert_priors_for_hyperparameters.py | 4 +- neps_examples/efficiency/multi_fidelity.py | 4 +- .../multi_fidelity_and_expert_priors.py | 4 +- .../efficiency/pytorch_lightning_ddp.py | 4 +- .../efficiency/pytorch_lightning_fsdp.py | 4 +- .../efficiency/pytorch_native_ddp.py | 4 +- .../efficiency/pytorch_native_fsdp.py | 4 +- neps_examples/efficiency/warmstarting.py | 74 ++++++- neps_examples/experimental/freeze_thaw.py | 4 +- .../test_neps_space/test_neps_integration.py | 10 +- ...st_neps_integration_priorband__max_cost.py | 4 +- ...t_neps_integration_priorband__max_evals.py | 4 +- .../test_search_space__fidelity.py | 4 +- .../test_search_space__grammar_like.py | 11 +- .../test_search_space__hnas_like.py | 4 +- .../test_search_space__nos_like.py | 4 +- .../test_search_space__recursion.py | 4 +- .../test_search_space__resampled.py | 8 +- .../test_search_space__reuse_arch_elements.py | 12 +- tests/test_neps_space/utils.py | 4 +- tests/test_state/test_neps_state.py | 38 ++-- 50 files changed, 615 insertions(+), 291 deletions(-) create mode 100644 neps_examples/basic_usage/priors_test.ipynb diff --git a/docs/getting_started.md b/docs/getting_started.md index c9e763f2b..d553768d8 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -16,7 +16,7 @@ pip install neural-pipeline-search 1. **Establish a [`pipeline_space=`](reference/neps_spaces.md)**: ```python -class PipelineSpace(neps.Pipeline): +class ExampleSpace(neps.PipelineSpace): # Define the parameters of your search space some_parameter = neps.Float(min_value=0.0, max_value=1.0) # float another_parameter = neps.Integer(min_value=0, max_value=10) # integer @@ -41,7 +41,7 @@ def evaluate_pipeline(some_parameter: float, 3. **Execute with [`neps.run()`](reference/neps_run.md)**: ```python -neps.run(evaluate_pipeline, PipelineSpace()) +neps.run(evaluate_pipeline, ExampleSpace()) ``` --- diff --git a/docs/index.md b/docs/index.md index 4ed7f5618..22ebb7b73 100644 --- a/docs/index.md +++ b/docs/index.md @@ -69,7 +69,7 @@ def evaluate_pipeline(hyperparameter_a: float, hyperparameter_b: int, architectu # 2. Define a search space of parameters; use the same parameter names as in evaluate_pipeline -class PipelineSpace(neps.Pipeline): +class ExampleSpace(neps.PipelineSpace): hyperparameter_a = neps.Float(min_value=0.001, max_value=0.1, log=True) # Log scale parameter hyperparameter_b = neps.Integer(min_value=1, max_value=42) architecture_parameter = neps.Categorical(choices=("option_a", "option_b")) @@ -78,7 +78,7 @@ class PipelineSpace(neps.Pipeline): logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=PipelineSpace(), + pipeline_space=ExampleSpace(), root_directory="path/to/save/results", # Replace with the actual path. max_evaluations_total=100, ) diff --git a/docs/reference/neps_run.md b/docs/reference/neps_run.md index 3fe30a814..d579a7c18 100644 --- a/docs/reference/neps_run.md +++ b/docs/reference/neps_run.md @@ -17,15 +17,15 @@ import neps def evaluate_pipeline(learning_rate: float, epochs: int) -> float: # Your code here - return loss +class ExamplePipeline(neps.PipelineSpace): + learning_rate = neps.Float(1e-3, 1e-1, log=True) + epochs = neps.Fidelity(neps.Integer(10, 100)) + neps.run( evaluate_pipeline=evaluate_pipeline, # (1)! - pipeline_space={, # (2)! - "learning_rate": neps.Float(1e-3, 1e-1, log=True), - "epochs": neps.Integer(10, 100) - }, + pipeline_space=ExamplePipeline(), # (2)! root_directory="path/to/result_dir" # (3)! ) ``` @@ -33,7 +33,7 @@ neps.run( 1. The objective function, targeted by NePS for minimization, by evaluation various configurations. It requires these configurations as input and should return either a dictionary or a sole loss value as the output. 2. This defines the search space for the configurations from which the optimizer samples. - It accepts either a dictionary with the configuration names as keys, a path to a YAML configuration file, or a [`configSpace.ConfigurationSpace`](https://automl.github.io/ConfigSpace/) object. + It accepts a class instance inheriting from `neps.PipelineSpace` or a [`configSpace.ConfigurationSpace`](https://automl.github.io/ConfigSpace/) object. For comprehensive information and examples, please refer to the detailed guide available [here](../reference/neps_spaces.md) 3. The directory path where the information about the optimization and its progress gets stored. This is also used to synchronize multiple calls to `neps.run()` for parallelization. diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 41787291d..3ca6c4b90 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -1,132 +1,107 @@ -# NePS Spaces: Joint Architecture and Hyperparameter Search +# NePS Spaces -NePS Spaces provides a powerful framework for defining and optimizing complex search spaces, enabling both and joint architecture and hyperparameter search (JAHS). +**NePS Spaces** provide a powerful framework for defining and optimizing complex search spaces across the entire pipeline, including [hyperparameters](#1-constructing-hyperparameter-spaces), [architecture search](#3-architectures) and [more](#4-general-structures). -## Constructing NePS Spaces +## 1. Constructing Hyperparameter Spaces -**NePS spaces** include all the necessary components to define a [Hyperparameter Optimization (HPO) search space](#hpo-search-spaces) like: +**NePS spaces** include all the necessary components to define a Hyperparameter Optimization (HPO) search space like: -- [`Integer`][neps.space.neps_spaces.parameters.Integer]: Discrete integer values -- [`Float`][neps.space.neps_spaces.parameters.Float]: Continuous float values -- [`Categorical`][neps.space.neps_spaces.parameters.Categorical]: Discrete categorical values -- [`Fidelity`][neps.space.neps_spaces.parameters.Fidelity]: Special type for float or integer, [multi-fidelity](../reference/search_algorithms/multifidelity.md) parameters (e.g., epochs, dataset size) +- [`neps.Integer`][neps.space.neps_spaces.parameters.Integer]: Discrete integer values +- [`neps.Float`][neps.space.neps_spaces.parameters.Float]: Continuous float values +- [`neps.Categorical`][neps.space.neps_spaces.parameters.Categorical]: Discrete categorical values +- [`neps.Fidelity`][neps.space.neps_spaces.parameters.Fidelity]: Special type for float or integer, [multi-fidelity](../reference/search_algorithms/multifidelity.md) parameters (e.g., epochs, dataset size) Using these types, you can define the parameters that NePS will optimize during the search process. -Additionally, **NePS spaces** can describe [complex (hierarchical) architectures](#hierarchies-and-architectures) using: - -- [`Operation`][neps.space.neps_spaces.parameters.Operation]: Define operations (e.g., convolution, pooling, activation) with arguments -- [`Resampled`][neps.space.neps_spaces.parameters.Resampled]: Resample other parameters - -### HPO Search Spaces - -A **NePS space** is defined as a subclass of [`Pipeline`][neps.space.neps_spaces.parameters.Pipeline]: +A **NePS space** is defined as a subclass of [`PipelineSpace`][neps.space.neps_spaces.parameters.PipelineSpace]. Here we define the hyperparameters that make up the space, like so: ```python -from neps import Pipeline, Float, Integer, Categorical, Fidelity, Resampled, Operation +import neps -class pipeline_space(Pipeline): +class MySpace(neps.PipelineSpace): + float_param = neps.Float(min_value=0.1, max_value=1.0) + int_param = neps.Integer(min_value=1, max_value=10) + cat_param = neps.Categorical(choices=("A", "B", "C")) ``` -Here we define the hyperparameters that make up the space, like so: +!!! tip "**[Fidelity](../reference/search_algorithms/landing_page_algo.md#what-is-multi-fidelity-optimization) Parameters**" -```python + Passing a [`neps.Integer`][neps.space.neps_spaces.parameters.Integer] or [`neps.Float`][neps.space.neps_spaces.parameters.Float] to a [`neps.Fidelity`][neps.space.neps_spaces.parameters.Fidelity] allows you to employ multi-fidelity optimization strategies, which can significantly speed up the optimization process by evaluating configurations at different fidelities (e.g., training for fewer epochs): - float_param = Float(min_value=0.1, max_value=1.0) - int_param = Integer(min_value=1, max_value=10) - cat_param = Categorical(choices=("A", "B", "C")) - epochs = Fidelity(Integer(1, 16)) -``` + ```python + epochs = neps.Fidelity(neps.Integer(1, 16)) + ``` -!!! tip "**Using your knowledge, providing a Prior**" + For more details on how to use fidelity parameters, see the [Multi-Fidelity](../reference/search_algorithms/landing_page_algo.md#what-is-multi-fidelity-optimization) section. + +!!! tip "**Using your knowledge, providing a [Prior](../reference/search_algorithms/landing_page_algo.md#what-are-priors)**" You can provide **your knowledge about where a good value for this parameter lies** by indicating a `prior=`. You can also specify a `prior_confidence=` to indicate how strongly you want NePS to focus on these, one of either `"low"`, `"medium"`, or `"high"`: ```python - # Categorical parameters can also choose between other parameters - # Here the float parameter (index 0) is used as a prior - float_or_int = Categorical(choices=(float_param, int_param), prior=0, prior_confidence="high") + # Here "A" is used as a prior, indicated by its index 0 + cat_with_prior = neps.Categorical(choices=("A", "B", "C"), prior=0, prior_confidence="high") ``` -### Hierarchies and Architectures + For more details on how to use priors, see the [Priors](../reference/search_algorithms/landing_page_algo.md#what-are-priors) section. -[Resampling][neps.space.neps_spaces.parameters.Resampled] and [operations][neps.space.neps_spaces.parameters.Operation] allow you to define complex architectures akin to [Context-Free Grammars (CFGs)](https://en.wikipedia.org/wiki/Context-free_grammar). +## 2. Using **NePS Spaces** -With `Resampled` you can reuse parameters in for other parameters, even themselves recursively: +To search a **NePS space**, pass it as the `pipeline_space` argument to the `neps.run()` function: ```python - # The new parameter will have the same range but will be resampled - # independently, so it can take different values than its source - resampled_float = Resampled(source=float_param) - - # If you only use a parameter to resample from it later, prefix it with an underscore - # This way, your evaluation function will not receive it as an argument - _float = Float(min_value=1, max_value=3) - resampled_float_2 = Resampled(source=_float) +neps.run( + ..., + pipeline_space=MySpace() +) ``` -??? info "Self- and future references" +For more details on how to use the `neps.run()` function, see the [NePS Run Reference](../reference/neps_run.md). - When referencing itself or a not yet defined parameter use a string of that parameters name: +## 3. Architectures - ```python - self_reference = Categorical(choices=(Resampled("self_reference"), Resampled("next_param"))) - next_param = Float(min_value=0, max_value=5) - ``` +Additionally, **NePS spaces** can describe **complex (hierarchical) architectures** using: -Operations can be Callables, (e.g. pytorch objects) whose arguments can themselves be parameters: +- [`Operation`][neps.space.neps_spaces.parameters.Operation]: Define operations and their arguments + +Operations can be Callables, (e.g. pytorch objects) which will be passed to the evaluation function as such: ```python -from torch.nn import Sequential, Conv2d, ReLU +import torch.nn -class NN_Space(Pipeline): +class NNSpace(PipelineSpace): - # Define an operation for a ReLU activation - _relu = Operation(operator=ReLU) + # Defining operations for different activation functions + _relu = Operation(operator=torch.nn.ReLU) + _sigmoid = Operation(operator=torch.nn.Sigmoid) - # Define a convolution operation with an optimizable kernel size parameter - _convolution = Operation( - operator=Conv2d, - kwargs={"kernel_size": Integer(min_value=1, max_value=10)} - # You could also define _kernel_size separately and use Resampled - ) + # We can then search over these operations and use them in the evaluation function + activation_function = neps.Categorical(choices=(_relu, _sigmoid)) +``` - _model_args = Categorical( - choices=( - # The Sequential will either get a convolution followed by a ReLU - (Resampled(_convolution), _relu,), - # Or two (different, hence the resampling) convolutions - (Resampled(_convolution), Resampled(_convolution)), - # Or just a ReLU activation - (_relu,), - ) - ) +!!! info "Intermediate parameters" - # Define a sequential operation, using the previously defined _model_args - # This model will be the only parameter passed to the evaluation function - model = Operation( - operator=Sequential, - args=_model_args - ) -``` + When defining parameters that should not be passed to the evaluation function and instead are used in other parameters, prefix them with an underscore, like here in `_layer_size`. Otherwise this might lead to `unexpected arguments` errors. -??? warning "Tuples as choice" +Operation also allow for (keyword-)arguments to be defined, including other parameters of the space: - When using a tuple as one of the choices in a `Categorical`, all choices must be tuples, as in the example above with ```(_relu,)```. +```python -## Using NePS Spaces + _layer_size = neps.Integer(min_value=80, max_value=100) -To use a NePS space, pass it as the `pipeline_space` argument to the `neps.run()` function: + hidden_layer = neps.Operation( + operator=torch.nn.Linear, + kwargs={"input_size": 64, # Fixed input size + "output_size": _layer_size}, # Using the previously defined parameter -```python -import neps -neps.run( - ..., - pipeline_space=NN_Space() -) + # Or for non-keyword arguments: + args=(activation_function,) + ) ``` -!!! abstract "NePS Space-compatible optimizers" +This can be used for efficient architecture search by defining cells and blocks of operations, that make up a neural network. + +??? abstract "Structural Space-compatible optimizers" Currently, NePS Spaces is compatible with these optimizers, which can be imported from [neps.algorithms][neps.optimizers.algorithms--neps-algorithms]: @@ -134,9 +109,68 @@ neps.run( - [`Complex Random Search`][neps.optimizers.algorithms.complex_random_search], which can sample the space uniformly at random, using priors and mutating previously sampled configurations - [`PriorBand`][neps.optimizers.algorithms.priorband], which uses [multi-fidelity](./search_algorithms/multifidelity.md) and the prior knowledge encoded in the NePS space +## 4. General Structures + +Until now all parameters are sampled once and their value used for all occurrences. This section describes how to resample parameters in different contexts using: + +- [`neps.Resampled`][neps.space.neps_spaces.parameters.Resampled]: Resample from an existing parameters range + +With `neps.Resampled` you can reuse a parameter, even themselves recursively, but with a new value each time: + +```python + float_param = neps.Float(min_value=0, max_value=1) + + # The resampled parameter will have the same range but will be sampled + # independently, so it can take a different value than its source + resampled_float = neps.Resampled(source=float_param) +``` + +This is especially useful for defining complex architectures, where e.g. a cell block is defined and then resampled multiple times to create a neural network architecture: + +```python + + _kernel_size = neps.Integer(min_value=5, max_value=8) + + # Define a cell block that can be resampled + # It will resample a new kernel size from _kernel_size each time + _cell_block = neps.Operation( + operator=torch.nn.Conv2d, + kwargs={"kernel_size": neps.Resampled(source=_kernel_size)} + ) + + # Resample the cell block multiple times to create a convolutional neural network + cnn = torch.nn.Sequential( + neps.Resampled(_cell_block), + neps.Resampled(_cell_block), + neps.Resampled(_cell_block), + ) +``` + +??? info "Self- and future references" + + When referencing itself or a not yet defined parameter (to enable recursions) use a string of that parameters name: + + ```python + self_reference = Categorical( + choices=( + # It will either choose to resample itself twice + (Resampled("self_reference"), Resampled("self_reference")), + # Or it will sample the future parameter + (Resampled("future_param"),), + ) + ) + # This results in a (possibly infinite) tuple of independently sampled future_params + + future_param = Float(min_value=0, max_value=5) + ``` + +!!! tip "Complex structural spaces" + + Together, [Resampling][neps.space.neps_spaces.parameters.Resampled] and [operations][neps.space.neps_spaces.parameters.Operation] allow you to define complex search spaces across the whole ML-pipeline akin to [Context-Free Grammars (CFGs)](https://en.wikipedia.org/wiki/Context-free_grammar), exceeding architecture search. For example, you can sample neural optimizers from a set of instructions, as done in [`NOSBench`](https://openreview.net/pdf?id=5Lm2ghxMlp) to train models. + ## Inspecting Configurations -NePS saves the configurations as paths, where each sampling decision is recorded. As they are hard to read, so you can load the configuration from the `results` directory using the [`NepsCompatConverter`][neps.space.neps_spaces.neps_space.NepsCompatConverter] class, which converts the configuration such that it can be used with the NePS Spaces API: +NePS saves the configurations as paths, where each sampling decision is recorded. As they are hard to read, so you can load the configuration from the `results/.../configs` directory using the [`NepsCompatConverter`][neps.space.neps_spaces.neps_space.NepsCompatConverter] class, which converts the configuration such that it can be used with the NePS Spaces API: ```python from neps.space.neps_spaces import neps_space @@ -144,7 +178,7 @@ import yaml with open("Path/to/config.yaml", "r") as f: conf_dict = yaml.safe_load(f) -resolution_context = NepsCompatConverter.from_neps_config(conf_dict) +config = NepsCompatConverter.from_neps_config(conf_dict) # Use the resolution context to sample the configuration using a # Sampler that follows the instructions in the configuration diff --git a/docs/reference/optimizers.md b/docs/reference/optimizers.md index 37667fb44..ef62a8803 100644 --- a/docs/reference/optimizers.md +++ b/docs/reference/optimizers.md @@ -42,7 +42,7 @@ NePS provides a multitude of optimizers from the literature, the [algorithms](.. ✅ = supported/necessary, ❌ = not supported, ✔️* = optional, click for details, ✖️\* ignorable, click for details -| Algorithm | [Multi-Fidelity](../reference/search_algorithms/multifidelity.md) | [Priors](../reference/search_algorithms/prior.md) | Model-based | [NePS-ready](../reference/neps_spaces.md#hierarchies-and-architectures) | +| Algorithm | [Multi-Fidelity](../reference/search_algorithms/multifidelity.md) | [Priors](../reference/search_algorithms/prior.md) | Model-based | [NePS-ready](../reference/neps_spaces.md#3-architectures) | | :- | :------------: | :----: | :---------: | :-----------------: | | `Grid Search`|[️️✖️*][neps.optimizers.algorithms.grid_search]|❌|❌|❌| | `Random Search`|[️️✖️*][neps.optimizers.algorithms.random_search]|[✔️*][neps.optimizers.algorithms.random_search]|❌|✅| diff --git a/docs/reference/search_algorithms/landing_page_algo.md b/docs/reference/search_algorithms/landing_page_algo.md index 0a91a2275..5a7d5adb1 100644 --- a/docs/reference/search_algorithms/landing_page_algo.md +++ b/docs/reference/search_algorithms/landing_page_algo.md @@ -6,7 +6,7 @@ We distinguish between algorithms that use different types of information and st ✅ = supported/necessary, ❌ = not supported, ✔️* = optional, click for details, ✖️\* ignorable, click for details -| Algorithm | [Multi-Fidelity](../search_algorithms/multifidelity.md) | [Priors](../search_algorithms/prior.md) | Model-based | [NePS-ready](../neps_spaces.md#hierarchies-and-architectures) | +| Algorithm | [Multi-Fidelity](../search_algorithms/multifidelity.md) | [Priors](../search_algorithms/prior.md) | Model-based | [NePS-ready](../neps_spaces.md#3-architectures) | | :- | :------------: | :----: | :---------: | :-----------------: | | `Grid Search`|[️️✖️*][neps.optimizers.algorithms.grid_search]|❌|❌|❌| | `Random Search`|[️️✖️*][neps.optimizers.algorithms.random_search]|[✔️*][neps.optimizers.algorithms.random_search]|❌|✅| @@ -37,7 +37,7 @@ We present a collection of MF-algorithms [here](./multifidelity.md) and algorith ## What are Priors? -Priors are used when there exists some information about the search space, that can be used to guide the optimization process. This information could come from expert domain knowledge or previous experiments. A Prior is provided in the form of a distribution over one dimension of the search space, with a `mean` (the suspected optimum) and a `confidence level`, or `variance`. We discuss how Priors can be included in your NePS-search space [here](../../reference/neps_spaces.md#hpo-search-spaces). +Priors are used when there exists some information about the search space, that can be used to guide the optimization process. This information could come from expert domain knowledge or previous experiments. A Prior is provided in the form of a distribution over one dimension of the search space, with a `mean` (the suspected optimum) and a `confidence level`, or `variance`. We discuss how Priors can be included in your NePS-search space [here](../../reference/neps_spaces.md#1-constructing-hyperparameter-spaces). !!! tip "Advantages of using Priors" diff --git a/neps/__init__.py b/neps/__init__.py index a506703bc..0be1e5a1b 100644 --- a/neps/__init__.py +++ b/neps/__init__.py @@ -19,7 +19,7 @@ Float, Integer, Operation, - Pipeline, + PipelineSpace, Resampled, ) from neps.state import BudgetInfo, Trial @@ -39,7 +39,7 @@ "HPOInteger", "Integer", "Operation", - "Pipeline", + "PipelineSpace", "Resampled", "SampledConfig", "SearchSpace", diff --git a/neps/api.py b/neps/api.py index 7fc0491f6..f63366249 100644 --- a/neps/api.py +++ b/neps/api.py @@ -23,7 +23,7 @@ check_neps_space_compatibility, convert_neps_to_classic_search_space, ) -from neps.space.neps_spaces.parameters import Pipeline +from neps.space.neps_spaces.parameters import PipelineSpace from neps.space.parsing import convert_to_space from neps.state import NePSState, OptimizationState, SeedSnapshot from neps.state.neps_state import TrialRepo @@ -43,7 +43,7 @@ def run( # noqa: PLR0913, C901 evaluate_pipeline: Callable[..., EvaluatePipelineReturn] | str, - pipeline_space: ConfigurationSpace | Pipeline, + pipeline_space: ConfigurationSpace | PipelineSpace, *, root_directory: str | Path = "neps_results", overwrite_working_directory: bool = False, @@ -61,8 +61,8 @@ def run( # noqa: PLR0913, C901 | Mapping[str, Any] | tuple[OptimizerChoice, Mapping[str, Any]] | Callable[Concatenate[SearchSpace, ...], AskFunction] # Hack, while we transit - | Callable[Concatenate[Pipeline, ...], AskFunction] # from SearchSpace to - | Callable[Concatenate[SearchSpace | Pipeline, ...], AskFunction] # Pipeline + | Callable[Concatenate[PipelineSpace, ...], AskFunction] # from SearchSpace to + | Callable[Concatenate[SearchSpace | PipelineSpace, ...], AskFunction] # Pipeline | CustomOptimizer | Literal["auto"] ) = "auto", @@ -96,7 +96,7 @@ def evaluate_pipeline(some_parameter: float) -> float: validation_error = -some_parameter return validation_error - PipelineSpace(Pipeline): + MySpace(PipelineSpace): dataset = "mnist" # constant nlayers = neps.Integer(2,10) # integer alpha = neps.Float(0.1, 1.0) # float @@ -118,7 +118,7 @@ def evaluate_pipeline(some_parameter: float) -> float: neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=PipelineSpace(), + pipeline_space=MySpace(), root_directory="usage_example", max_evaluations_total=5, ) @@ -152,7 +152,7 @@ def evaluate_pipeline(some_parameter: float) -> float: This most direct way to specify the search space is as follows: ```python - PipelineSpace(Pipeline): + MySpace(PipelineSpace): dataset = "mnist" # constant nlayers = neps.Integer(2,10) # integer alpha = neps.Float(0.1, 1.0) # float @@ -173,7 +173,7 @@ def evaluate_pipeline(some_parameter: float) -> float: ) neps.run( - pipeline_space=PipelineSpace() + pipeline_space=MySpace() ) ``` @@ -371,7 +371,7 @@ def __call__( # Optimizer check, if the search space is a Pipeline and the optimizer is not a NEPS # algorithm, we raise an error, as the optimizer is not compatible. if ( - isinstance(pipeline_space, Pipeline) + isinstance(pipeline_space, PipelineSpace) and neps_classic_space_compatibility == "classic" ): raise ValueError( @@ -380,7 +380,7 @@ def __call__( "'priorband', or 'complex_random_search'." ) - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): assert not isinstance(evaluate_pipeline, str) evaluate_pipeline = adjust_evaluation_pipeline_for_neps_space( evaluate_pipeline, pipeline_space @@ -439,7 +439,7 @@ def __call__( def warmstart_neps( - pipeline_space: Pipeline, + pipeline_space: PipelineSpace, root_directory: Path | str, warmstart_configs: Sequence[ tuple[ @@ -454,8 +454,8 @@ def warmstart_neps( | Mapping[str, Any] | tuple[OptimizerChoice, Mapping[str, Any]] | Callable[Concatenate[SearchSpace, ...], AskFunction] # Hack, while we transit - | Callable[Concatenate[Pipeline, ...], AskFunction] # from SearchSpace to - | Callable[Concatenate[SearchSpace | Pipeline, ...], AskFunction] # Pipeline + | Callable[Concatenate[PipelineSpace, ...], AskFunction] # from SearchSpace to + | Callable[Concatenate[SearchSpace | PipelineSpace, ...], AskFunction] # Pipeline | CustomOptimizer | Literal["auto"] ) = "auto", diff --git a/neps/optimizers/__init__.py b/neps/optimizers/__init__.py index 50afa8e03..e161c1f61 100644 --- a/neps/optimizers/__init__.py +++ b/neps/optimizers/__init__.py @@ -15,12 +15,12 @@ if TYPE_CHECKING: from neps.space import SearchSpace - from neps.space.neps_spaces.parameters import Pipeline + from neps.space.neps_spaces.parameters import PipelineSpace def _load_optimizer_from_string( optimizer: OptimizerChoice | Literal["auto"], - space: SearchSpace | Pipeline, + space: SearchSpace | PipelineSpace, *, optimizer_kwargs: Mapping[str, Any] | None = None, ) -> tuple[AskFunction, OptimizerInfo]: @@ -50,12 +50,12 @@ def load_optimizer( | Mapping[str, Any] | tuple[OptimizerChoice, Mapping[str, Any]] | Callable[Concatenate[SearchSpace, ...], AskFunction] # Hack, while we transit - | Callable[Concatenate[Pipeline, ...], AskFunction] # from SearchSpace to - | Callable[Concatenate[SearchSpace | Pipeline, ...], AskFunction] # Pipeline + | Callable[Concatenate[PipelineSpace, ...], AskFunction] # from SearchSpace to + | Callable[Concatenate[SearchSpace | PipelineSpace, ...], AskFunction] # Pipeline | CustomOptimizer | Literal["auto"] ), - space: SearchSpace | Pipeline, + space: SearchSpace | PipelineSpace, ) -> tuple[AskFunction, OptimizerInfo]: match optimizer: # Predefined string (including "auto") diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index e0267a259..7c6f90697 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -50,7 +50,7 @@ Categorical, Float, Integer, - Pipeline, + PipelineSpace, Resolvable, ) @@ -65,7 +65,7 @@ def _bo( # noqa: C901, PLR0912 - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, initial_design_size: int | Literal["ndim"] = "ndim", use_priors: bool, @@ -102,7 +102,7 @@ def _bo( # noqa: C901, PLR0912 ValueError: if initial_design_size < 1 ValueError: if fidelity is not None and ignore_fidelity is False """ - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -159,7 +159,7 @@ def _bo( # noqa: C901, PLR0912 def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], eta: int, @@ -231,7 +231,7 @@ def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 multi_objective: Whether to use multi-objective promotion strategies. Only used in case of multi-objective multi-fidelity algorithms. """ - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -408,8 +408,8 @@ def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 ) -def determine_optimizer_automatically(space: SearchSpace | Pipeline) -> str: - if isinstance(space, Pipeline): +def determine_optimizer_automatically(space: SearchSpace | PipelineSpace) -> str: + if isinstance(space, PipelineSpace): if space.fidelity_attrs: return "neps_priorband" return "complex_random_search" @@ -432,7 +432,7 @@ def determine_optimizer_automatically(space: SearchSpace | Pipeline) -> str: def random_search( - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, use_priors: bool = False, ignore_fidelity: bool | Literal["highest fidelity"] = False, @@ -448,7 +448,7 @@ def random_search( ignore_fidelity: Whether to ignore fidelity when sampling. In this case, the max fidelity is always used. """ - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -511,7 +511,7 @@ def random_search( def grid_search( - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, ignore_fidelity: bool = False, ) -> GridSearch: @@ -525,7 +525,7 @@ def grid_search( """ from neps.optimizers.utils.grid import make_grid - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -551,7 +551,7 @@ def grid_search( def ifbo( - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, step_size: int | float = 1, use_priors: bool = False, @@ -601,7 +601,7 @@ def ifbo( surrogate_path: Path to the surrogate model to use surrogate_version: Version of the surrogate model to use """ - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -679,7 +679,7 @@ def ifbo( def successive_halving( - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, sampler: Literal["uniform", "prior"] = "uniform", eta: int = 3, @@ -747,7 +747,7 @@ def successive_halving( sample_prior_first: Whether to sample the prior configuration first, and if so, should it be at the highest fidelity level. """ - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -770,7 +770,7 @@ def successive_halving( def hyperband( - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, eta: int = 3, sampler: Literal["uniform", "prior"] = "uniform", @@ -820,7 +820,7 @@ def hyperband( sample_prior_first: Whether to sample the prior configuration first, and if so, should it be at the highest fidelity level. """ - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -843,7 +843,7 @@ def hyperband( def mo_hyperband( - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, eta: int = 3, sampler: Literal["uniform", "prior"] = "uniform", @@ -853,7 +853,7 @@ def mo_hyperband( """Multi-objective version of hyperband using the same candidate selection method as MOASHA. """ - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -878,7 +878,7 @@ def mo_hyperband( def asha( - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, eta: int = 3, early_stopping_rate: int = 0, @@ -927,7 +927,7 @@ def asha( sample_prior_first: Whether to sample the prior configuration first, and if so, should it be at the highest fidelity. """ - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -950,7 +950,7 @@ def asha( def moasha( - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, eta: int = 3, early_stopping_rate: int = 0, @@ -958,7 +958,7 @@ def moasha( sample_prior_first: bool | Literal["highest_fidelity"] = False, mo_selector: Literal["nsga2", "epsnet"] = "epsnet", ) -> BracketOptimizer: - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -983,7 +983,7 @@ def moasha( def async_hb( - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, eta: int = 3, sampler: Literal["uniform", "prior"] = "uniform", @@ -1029,7 +1029,7 @@ def async_hb( sample_prior_first: Whether to sample the prior configuration first. """ - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -1052,7 +1052,7 @@ def async_hb( def priorband( - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, eta: int = 3, sample_prior_first: bool | Literal["highest_fidelity"] = False, @@ -1096,7 +1096,7 @@ def priorband( `N` * `maximum_fidelity` worth of fidelity has been evaluated, proceed with bayesian optimization when sampling a new configuration. """ - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -1188,7 +1188,7 @@ def bayesian_optimization( optimization. If `None`, the reference point will be calculated automatically. """ - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -1236,7 +1236,7 @@ def bayesian_optimization( def pibo( - pipeline_space: SearchSpace | Pipeline, + pipeline_space: SearchSpace | PipelineSpace, *, initial_design_size: int | Literal["ndim"] = "ndim", cost_aware: bool | Literal["log"] = False, @@ -1275,7 +1275,7 @@ def pibo( ignore_fidelity: Whether to ignore the fidelity parameter when sampling. In this case, the max fidelity is always used. """ - if isinstance(pipeline_space, Pipeline): + if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space is not None: pipeline_space = converted_space @@ -1319,7 +1319,7 @@ class CustomOptimizer: kwargs: Mapping[str, Any] = field(default_factory=dict) initialized: bool = False - def create(self, space: SearchSpace | Pipeline) -> AskFunction: + def create(self, space: SearchSpace | PipelineSpace) -> AskFunction: assert not self.initialized, "Custom optimizer already initialized." return self.optimizer(space, **self.kwargs) # type: ignore @@ -1348,7 +1348,7 @@ def custom( def complex_random_search( - pipeline_space: Pipeline, + pipeline_space: PipelineSpace, *, ignore_fidelity: bool | Literal["highest fidelity"] = False, ) -> NePSComplexRandomSearch: @@ -1356,7 +1356,7 @@ def complex_random_search( but allows for more complex sampling strategies. Args: - pipeline: The search space to sample from. + pipeline_space: The search space to sample from. ignore_fidelity: Whether to ignore the fidelity parameter when sampling. If `True`, the algorithm will sample the fidelity like a normal parameter. If set to `"highest fidelity"`, it will always sample at the highest fidelity. @@ -1384,7 +1384,7 @@ def complex_random_search( def neps_random_search( - pipeline_space: Pipeline, + pipeline_space: PipelineSpace, *, use_priors: bool = False, ignore_fidelity: bool | Literal["highest fidelity"] = False, @@ -1437,7 +1437,7 @@ def neps_random_search( def _neps_bracket_optimizer( - pipeline_space: Pipeline, + pipeline_space: PipelineSpace, *, bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], eta: int, @@ -1542,7 +1542,7 @@ def _neps_bracket_optimizer( def neps_priorband( - pipeline_space: Pipeline, + pipeline_space: PipelineSpace, *, eta: int = 3, sample_prior_first: bool | Literal["highest_fidelity"] = False, @@ -1593,8 +1593,8 @@ def neps_priorband( PredefinedOptimizers: Mapping[ str, Callable[Concatenate[SearchSpace, ...], AskFunction] - | Callable[Concatenate[Pipeline, ...], AskFunction] - | Callable[Concatenate[SearchSpace, Pipeline, ...], AskFunction], + | Callable[Concatenate[PipelineSpace, ...], AskFunction] + | Callable[Concatenate[SearchSpace, PipelineSpace, ...], AskFunction], ] = { f.__name__: f for f in ( diff --git a/neps/optimizers/bayesian_optimization.py b/neps/optimizers/bayesian_optimization.py index 2784fcc34..cb302a9de 100644 --- a/neps/optimizers/bayesian_optimization.py +++ b/neps/optimizers/bayesian_optimization.py @@ -23,7 +23,7 @@ from neps.optimizers.optimizer import SampledConfig from neps.optimizers.utils.initial_design import make_initial_design from neps.space.neps_spaces.neps_space import convert_neps_to_classic_search_space -from neps.space.neps_spaces.parameters import Pipeline +from neps.space.neps_spaces.parameters import PipelineSpace if TYPE_CHECKING: from neps.sampling import Prior @@ -65,7 +65,7 @@ def _pibo_exp_term( class BayesianOptimization: """Uses `botorch` as an engine for doing bayesian optimiziation.""" - space: SearchSpace | Pipeline + space: SearchSpace | PipelineSpace """The search space to use.""" encoder: ConfigEncoder @@ -95,7 +95,7 @@ def __call__( # noqa: C901, PLR0912, PLR0915 # noqa: C901, PLR0912 budget_info: BudgetInfo | None = None, n: int | None = None, ) -> SampledConfig | list[SampledConfig]: - if isinstance(self.space, Pipeline): + if isinstance(self.space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(self.space) if converted_space is not None: self.space = converted_space diff --git a/neps/optimizers/bracket_optimizer.py b/neps/optimizers/bracket_optimizer.py index 74dafeace..f2509f490 100644 --- a/neps/optimizers/bracket_optimizer.py +++ b/neps/optimizers/bracket_optimizer.py @@ -22,7 +22,7 @@ from neps.optimizers.utils.brackets import PromoteAction, SampleAction from neps.sampling.samplers import Sampler from neps.space.neps_spaces.neps_space import convert_neps_to_classic_search_space -from neps.space.neps_spaces.parameters import Pipeline +from neps.space.neps_spaces.parameters import PipelineSpace from neps.utils.common import disable_warnings if TYPE_CHECKING: @@ -214,7 +214,7 @@ class BracketOptimizer: `"successive_halving"`, `"asha"`, `"hyperband"`, etc. """ - space: SearchSpace | Pipeline + space: SearchSpace | PipelineSpace """The pipeline space to optimize over.""" encoder: ConfigEncoder @@ -259,7 +259,7 @@ def __call__( # noqa: C901, PLR0912, PLR0915 budget_info: BudgetInfo | None, n: int | None = None, ) -> SampledConfig | list[SampledConfig]: - if isinstance(self.space, Pipeline): + if isinstance(self.space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(self.space) if converted_space is not None: self.space = converted_space diff --git a/neps/optimizers/ifbo.py b/neps/optimizers/ifbo.py index 13c2ed426..42ab65b52 100755 --- a/neps/optimizers/ifbo.py +++ b/neps/optimizers/ifbo.py @@ -18,7 +18,7 @@ from neps.sampling import Prior, Sampler from neps.space import ConfigEncoder, Domain, HPOFloat, HPOInteger, SearchSpace from neps.space.neps_spaces.neps_space import convert_neps_to_classic_search_space -from neps.space.neps_spaces.parameters import Pipeline +from neps.space.neps_spaces.parameters import PipelineSpace if TYPE_CHECKING: from neps.state import BudgetInfo, Trial @@ -104,7 +104,7 @@ class IFBO: * Github: https://github.com/automl/ifBO/tree/main """ - space: SearchSpace | Pipeline + space: SearchSpace | PipelineSpace """The entire search space for the pipeline.""" encoder: ConfigEncoder @@ -137,7 +137,7 @@ def __call__( budget_info: BudgetInfo | None = None, n: int | None = None, ) -> SampledConfig | list[SampledConfig]: - if isinstance(self.space, Pipeline): + if isinstance(self.space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(self.space) if converted_space is not None: self.space = converted_space diff --git a/neps/optimizers/neps_bracket_optimizer.py b/neps/optimizers/neps_bracket_optimizer.py index 46d3cc9d5..ec5fc6347 100644 --- a/neps/optimizers/neps_bracket_optimizer.py +++ b/neps/optimizers/neps_bracket_optimizer.py @@ -27,7 +27,7 @@ if TYPE_CHECKING: from neps.optimizers.neps_priorband import NePSPriorBandSampler from neps.optimizers.utils.brackets import Bracket - from neps.space.neps_spaces.parameters import Pipeline + from neps.space.neps_spaces.parameters import PipelineSpace from neps.state.optimizer import BudgetInfo from neps.state.trial import Trial @@ -39,7 +39,7 @@ class _NePSBracketOptimizer: """The pipeline space to optimize over.""" - space: Pipeline + space: PipelineSpace """Whether or not to sample the prior first. diff --git a/neps/optimizers/neps_priorband.py b/neps/optimizers/neps_priorband.py index 5d82a9432..2059d053a 100644 --- a/neps/optimizers/neps_priorband.py +++ b/neps/optimizers/neps_priorband.py @@ -20,14 +20,14 @@ if TYPE_CHECKING: import pandas as pd - from neps.space.neps_spaces.parameters import Pipeline + from neps.space.neps_spaces.parameters import PipelineSpace @dataclass class NePSPriorBandSampler: """Implement a sampler based on PriorBand.""" - space: Pipeline + space: PipelineSpace """The pipeline space to optimize over.""" eta: int diff --git a/neps/optimizers/neps_random_search.py b/neps/optimizers/neps_random_search.py index 03b1a729f..db0678659 100644 --- a/neps/optimizers/neps_random_search.py +++ b/neps/optimizers/neps_random_search.py @@ -25,7 +25,7 @@ import neps.state.optimizer as optimizer_state import neps.state.trial as trial_state from neps.optimizers import optimizer - from neps.space.neps_spaces.parameters import Pipeline + from neps.space.neps_spaces.parameters import PipelineSpace from neps.state.trial import Trial @@ -43,7 +43,7 @@ class NePSRandomSearch: def __init__( self, - pipeline: Pipeline, + pipeline: PipelineSpace, use_priors: bool = False, # noqa: FBT001, FBT002 ignore_fidelity: bool | Literal["highest fidelity"] = False, # noqa: FBT002 ): @@ -151,7 +151,7 @@ class NePSComplexRandomSearch: def __init__( self, - pipeline: Pipeline, + pipeline: PipelineSpace, ignore_fidelity: bool | Literal["highest fidelity"] = False, # noqa: FBT002 ): """Initialize the ComplexRandomSearch optimizer with a pipeline. diff --git a/neps/optimizers/random_search.py b/neps/optimizers/random_search.py index 6d1f0cfa0..6afb214e1 100644 --- a/neps/optimizers/random_search.py +++ b/neps/optimizers/random_search.py @@ -6,7 +6,7 @@ from neps.optimizers.optimizer import SampledConfig from neps.space.neps_spaces.neps_space import convert_neps_to_classic_search_space -from neps.space.neps_spaces.parameters import Pipeline +from neps.space.neps_spaces.parameters import PipelineSpace if TYPE_CHECKING: from neps.sampling import Sampler @@ -18,7 +18,7 @@ class RandomSearch: """A simple random search optimizer.""" - space: SearchSpace | Pipeline + space: SearchSpace | PipelineSpace encoder: ConfigEncoder sampler: Sampler @@ -28,7 +28,7 @@ def __call__( budget_info: BudgetInfo | None, n: int | None = None, ) -> SampledConfig | list[SampledConfig]: - if isinstance(self.space, Pipeline): + if isinstance(self.space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(self.space) if converted_space is not None: self.space = converted_space diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index d185cbf05..b91120271 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -21,7 +21,7 @@ Float, Integer, Operation, - Pipeline, + PipelineSpace, Resampled, Resolvable, ) @@ -35,7 +35,7 @@ if TYPE_CHECKING: from neps.space import SearchSpace -P = TypeVar("P", bound="Pipeline") +P = TypeVar("P", bound="PipelineSpace") class SamplingResolutionContext: @@ -353,7 +353,7 @@ def _resolver_dispatch( @_resolver_dispatch.register def _( self, - pipeline_obj: Pipeline, + pipeline_obj: PipelineSpace, context: SamplingResolutionContext, ) -> Any: if context.was_already_resolved(pipeline_obj): @@ -829,7 +829,7 @@ def from_neps_config( def _prepare_sampled_configs( - chosen_pipelines: list[tuple[Pipeline, SamplingResolutionContext]], + chosen_pipelines: list[tuple[PipelineSpace, SamplingResolutionContext]], n_prev_trials: int, return_single: bool, # noqa: FBT001 ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: @@ -912,7 +912,7 @@ def inner(*args: Any, **kwargs: Any) -> Any: return inner -def convert_neps_to_classic_search_space(space: Pipeline) -> SearchSpace | None: +def convert_neps_to_classic_search_space(space: PipelineSpace) -> SearchSpace | None: """Convert a NePS space to a classic SearchSpace if possible. This function checks if the NePS space can be converted to a classic SearchSpace by ensuring that it does not contain any complex types like Operation or Resampled, @@ -1000,10 +1000,10 @@ def check_neps_space_compatibility( Concatenate[SearchSpace, ...], optimizer.AskFunction ] # Hack, while we transit | Callable[ - Concatenate[Pipeline, ...], optimizer.AskFunction + Concatenate[PipelineSpace, ...], optimizer.AskFunction ] # from SearchSpace to | Callable[ - Concatenate[SearchSpace | Pipeline, ...], optimizer.AskFunction + Concatenate[SearchSpace | PipelineSpace, ...], optimizer.AskFunction ] # Pipeline | algorithms.CustomOptimizer | Literal["auto"] diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index a83b25915..974d2ea08 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -131,7 +131,7 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Fidelity: # noqa: ARG002 raise ValueError("For a Fidelity object there is nothing to resolve.") -class Pipeline(Resolvable): +class PipelineSpace(Resolvable): """A class representing a pipeline in NePS spaces.""" @property @@ -170,7 +170,7 @@ def get_attrs(self) -> Mapping[str, Any]: return attrs - def from_attrs(self, attrs: Mapping[str, Any]) -> Pipeline: + def from_attrs(self, attrs: Mapping[str, Any]) -> PipelineSpace: """Create a new Pipeline instance from the given attributes. Args: @@ -183,7 +183,7 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Pipeline: Raises: ValueError: If the attributes do not match the pipeline's expected structure. """ - new_pipeline = Pipeline() + new_pipeline = PipelineSpace() for name, value in attrs.items(): setattr(new_pipeline, name, value) return new_pipeline @@ -758,7 +758,7 @@ def __init__( min_value: int, max_value: int, log: bool = False, # noqa: FBT001, FBT002 - prior: int | _Unset = _UNSET, + prior: float | int | _Unset = _UNSET, prior_confidence: ( Literal["low", "medium", "high"] | ConfidenceLevel | _Unset ) = _UNSET, diff --git a/neps/space/neps_spaces/sampling.py b/neps/space/neps_spaces/sampling.py index 6b4df10e0..b67465c99 100644 --- a/neps/space/neps_spaces/sampling.py +++ b/neps/space/neps_spaces/sampling.py @@ -17,11 +17,11 @@ Domain, Float, Integer, - Pipeline, + PipelineSpace, ) T = TypeVar("T") -P = TypeVar("P", bound="Pipeline") +P = TypeVar("P", bound="PipelineSpace") @runtime_checkable diff --git a/neps/space/parsing.py b/neps/space/parsing.py index 5014f5880..186421f78 100644 --- a/neps/space/parsing.py +++ b/neps/space/parsing.py @@ -9,7 +9,7 @@ from collections.abc import Mapping, Sequence from typing import TYPE_CHECKING, Any, TypeAlias -from neps.space.neps_spaces.parameters import Pipeline +from neps.space.neps_spaces.parameters import PipelineSpace from neps.space.parameters import ( HPOCategorical, HPOConstant, @@ -304,9 +304,9 @@ def convert_to_space( Mapping[str, dict | str | int | float | Parameter] | SearchSpace | ConfigurationSpace - | Pipeline + | PipelineSpace ), -) -> SearchSpace | Pipeline: +) -> SearchSpace | PipelineSpace: """Converts a search space to a SearchSpace object. Args: @@ -329,7 +329,7 @@ def convert_to_space( return space case Mapping(): return convert_mapping(space) - case Pipeline(): + case PipelineSpace(): return space case _: raise ValueError( diff --git a/neps/status/status.py b/neps/status/status.py index 6366bc5b8..7280faf68 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -20,7 +20,7 @@ from neps.state.trial import State, Trial if TYPE_CHECKING: - from neps.space.neps_spaces.parameters import Pipeline + from neps.space.neps_spaces.parameters import PipelineSpace @dataclass @@ -107,7 +107,7 @@ def num_pending(self) -> int: return len(self.by_state[State.PENDING]) def formatted( # noqa: PLR0912, C901 - self, pipeline_space_variables: tuple[Pipeline, list[str]] | None = None + self, pipeline_space_variables: tuple[PipelineSpace, list[str]] | None = None ) -> str: """Return a formatted string of the summary. @@ -258,7 +258,7 @@ def status( root_directory: str | Path, *, print_summary: bool = False, - pipeline_space_variables: tuple[Pipeline, list[str]] | None = None, + pipeline_space_variables: tuple[PipelineSpace, list[str]] | None = None, ) -> tuple[pd.DataFrame, pd.Series]: """Print status information of a neps run and return results. diff --git a/neps_examples/basic_usage/hyperparameters.py b/neps_examples/basic_usage/hyperparameters.py index 348ff6362..8b742aa6b 100644 --- a/neps_examples/basic_usage/hyperparameters.py +++ b/neps_examples/basic_usage/hyperparameters.py @@ -2,6 +2,7 @@ import numpy as np import neps + def evaluate_pipeline(float1, float2, categorical, integer1, integer2): objective_to_minimize = -float( np.sum([float1, float2, int(categorical), integer1, integer2]) @@ -9,17 +10,18 @@ def evaluate_pipeline(float1, float2, categorical, integer1, integer2): return objective_to_minimize -class PipelineSpace(neps.Pipeline): - float1=neps.Float(min_value=0, max_value=1) - float2=neps.Float(min_value=-10, max_value=10) - categorical=neps.Categorical(choices=(0, 1)) - integer1=neps.Integer(min_value=0, max_value=1) - integer2=neps.Integer(min_value=1, max_value=1000, log=True) +class HPOSpace(neps.PipelineSpace): + float1 = neps.Float(min_value=0, max_value=1) + float2 = neps.Float(min_value=-10, max_value=10) + categorical = neps.Categorical(choices=(0, 1)) + integer1 = neps.Integer(min_value=0, max_value=1) + integer2 = neps.Integer(min_value=1, max_value=1000, log=True) + logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=PipelineSpace(), + pipeline_space=HPOSpace(), root_directory="results/hyperparameters_example", post_run_summary=True, max_evaluations_total=30, diff --git a/neps_examples/basic_usage/priors_test.ipynb b/neps_examples/basic_usage/priors_test.ipynb new file mode 100644 index 000000000..e3ab00a1d --- /dev/null +++ b/neps_examples/basic_usage/priors_test.ipynb @@ -0,0 +1,186 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "180fcb7f", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA/wAAAPxCAYAAABHP6YlAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAA7QdJREFUeJzs3QmczfX++PH3MBjbjKXMkCFF9i3ElMqWCcnW/d26YpSIcENRupaQSGXN0iLDRaKLImvWWw0GiSghN8pWiQkZy5z/4/25/++554zZnTPnnO95PR+PrzPnfL/zPd/v9xzz/r4/a4jD4XAIAAAAAACwlTy+PgAAAAAAAOB5JPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA2R8AMAAAAAYEMk/AAAAAAA2BAJPwAAAAAANkTCDwAAAACADZHwAwAAAABgQyT8CFovv/yyhISE5Mp7NWnSxCyWTZs2mff+6KOPcuX9u3XrJrfeeqv4s/Pnz8tTTz0lUVFR5tr0798/3W31XPScAADICLHevxDrgdxHwg9biI+PN4HDWsLCwqRMmTISGxsrU6ZMkT/++MMj73P8+HFz87B7927xN/58bFnx6quvms+xd+/e8s9//lO6dOnilfeZPn26eR/8z4cffiiPP/64VKpUyfz/cb1hBQB/Qaz372PLCmK9b/z222/y+uuvy3333Sc333yzFCtWTBo1amTiP+wvxOFwOHx9EMCN0j/qTzzxhIwaNUoqVKggV65ckZMnT5rS9XXr1km5cuXkk08+kVq1ajl/5+rVq2bRG4as2rFjhzRo0EBmz56drVLny5cvm8f8+fObRz2upk2byuLFi+WRRx7J1rnm5Nj0eqSkpEiBAgXEX2ngCQ0Nlc8//zzTbZOTkyVPnjySL1++bL9PjRo15KabbjKfAf5LE/ydO3ea74/eROr/E64PAH9DrCfWZxWx3t2KFSukY8eO0rp1a/Od1M/gX//6l2zcuFGGDx8uI0eO9PUhwotCvblzILe1atVK6tev73w+ZMgQ2bBhgzz00EPy8MMPy7fffisFCxY06/SPnS7edPHiRSlUqJAz+PtKToJlbjt9+rRUq1YtS9v6883Mjbpw4YIULlw4V99Ta1luueUWc2OlN0kA4M+I9Wkj1geO3I711atXl4MHD0r58uWdrz3zzDPSokULee2112Tw4MG5fu+B3EOTfthes2bNZNiwYfLjjz/KvHnzMuzXpzUEjRs3Nk2dihQpIpUrV5aXXnrJrNNSYi1VV1rDYDUptJqMaS2pJktaU6pNpjT4W7+bul+f5dq1a2Yb7cumf2j1RuXYsWNZ6sPmus/Mji2tfn0abJ577jmJjo42QVXP9Y033pDUjX50P3379pVly5aZ89NtNXCsXr06y8G9e/fuEhkZaWpYateuLXPmzLmuj+ORI0fk008/dR77f/7zn3T3mfqaWM08v/jiCxk4cKBprqbXs0OHDvLLL7+4/d6+fftk8+bNzvfJTvN161rMnz/fXC89n3r16smWLVvcttPvmgZS3UZvOkuWLCl/+ctfrjsn67j1eHT7UqVKSdmyZXO0D60t+fvf/+5sqvf000+b2qazZ89K165dpXjx4mbRoJ76M9bvgCb7ABCoiPXEetffI9a7x3ptEeOa7Fvn2b59e9OS4ocffsjy9UHgoYYfQUH7iGmwXbt2rfTo0SPNbTQ4aO2ANgXU5oIa7A4dOmQCi6patap5XZs+9ezZU+69917z+t133+3WR0prHh599FHTJ1oDX0bGjBlj/uC+8MILJlhOmjTJlLZqs2qrdiIrsnJsrjQI6A2HNuXSAF2nTh1Zs2aNDBo0SH7++WeZOHGi2/YaYJYsWWKCUtGiRU1fyU6dOsnRo0dNcErPn3/+aYKsXkcNnhpwtGmjBnANTs8++6w5dq1hHjBggAmAemOiNJhlV79+/UygGzFihAmWej31fa0+avpct9EbvH/84x/mtcw+o9Q0YOv+NODqd0T7CT744IOyfft2Z+14YmKifPnll+Z7oOekxzJjxgxzLfbv329uEF3pddXz1c9Pb85ysg89L72Z1GZ5W7dulXfeecfcDOg+tJmr9ptcuXKl6cOnx6k3BgBgJ8R6d8R6Yn1msV67xCjt/gAb0z78QKCbPXu2FmM6EhMT090mIiLCUbduXefzESNGmN+xTJw40Tz/5Zdf0t2H7l+30fdL7f777zfrZs6cmeY6XSwbN240295yyy2OpKQk5+uLFi0yr0+ePNn5Wvny5R1xcXGZ7jOjY9Pf1/1Yli1bZrZ95ZVX3LZ75JFHHCEhIY5Dhw45X9Pt8ufP7/ba119/bV6fOnWqIyOTJk0y282bN8/52uXLlx0xMTGOIkWKuJ27Hl+bNm0y3F9618T6/Fu0aOFISUlxvj5gwABH3rx5HWfPnnW+Vr16dbfrlh36Hrrs2LHD+dqPP/7oCAsLc3To0MH52sWLF6/73YSEBPO7c+fOve64Gzdu7Lh69arb9tndR2xsrNu56zXWz7JXr17O1/Q9ypYtm+H538j1AQBvItYT64n1non16rfffnOUKlXKce+992ZyRRDoaMOJoKElvRmN4KslpOrjjz82g97khJYCazO7rNKSVy1Ft+igPqVLlzals96k+8+bN68puXalJe4a61atWuX2utZE3H777c7nWjMSHh6eaRMwfR8tiX7sscfc+hjq++rUPFqC7kla4+HadFNrP7QppTaZ85SYmBjTtM+iJert2rUztSb6Xsq1xkYHUdLaoIoVK5rv2K5du67bp9ZE6efhKrv70Nob13Nv2LCh+Sz1dYu+h/Z7pekeALsi1v8PsT7n7B7r9bvfuXNn0wJj6tSpWbwqCFQk/AgaGnRcA25qf/3rX+Wee+4x88Nq0y9tXrVo0aJs3RDowGfZGbRHp0FzpX/E9Q99Rn3aPEGDok5llPp6aJM7a70rDXSpaXO633//PdP30XNM3T88vfe5UamPU49RZXac2ZH6M1N33HGHGbTJ6kOozRu1yZ7VZ1KbymkzPg2s586du+73tfljatndR+pzj4iIMI/6+6lf9+T1AAB/Qqz/H2J9ztk91mvXAB2f4b333jPjLcDe6MOPoPDTTz+ZP5waYNOjpaw6IIv2ddMBZfQPofbf0oGAtD9g6lLZ9PbhaakHG7JoCXNWjskT0nsff5vV01+OUwOpTpnUv39/U0uggVc/R72xTOumMq3vTXb3kd65p/W6v31uAOAJxHp7xNBAOc5AjfXa/1/HJBg3bpwZ9wL2R8KPoKADxajY2NgMt9PS6ebNm5tlwoQJZvATHfBFbwy0qVt6ATmndIqU1H+cddAb1zmEteRaS3pT0xLz2267zfk8O8emI7V+9tlnptmja8n/d99951zvCbqfPXv2mKDlWvLv6ffJjhv9DFN/Zur77783A+tYgw999NFHEhcXJ2+++aZzm0uXLqX5OabHE/sAgGBCrHdHrM85u8b6adOmmZkrtIBBB5FEcKBJP2xP5+YdPXq0aUql/ZXSc+bMmete0xFtlU5Zoqw5Sj31h3ju3LlufQ31D/+JEyfM6L8W7U+nI7HqtCuWFStWXDelT3aOrXXr1qbW4K233nJ7XUfs1SDp+v43Qt9HR4C1Rs5VV69eNf3FtJ/l/fffL7lNr9ONfH4JCQlu/er0c9C+oC1btnSWsOtj6pJ1PWer319WeGIfABAsiPXXI9YT611Zsw7o/w8t6ELwoIYftqID0GiJsgaaU6dOmRsAnW9XS5c/+eQTM5dqenSqG23m16ZNG7O9Tp2jTZ50mhSdr9cKyDqQysyZM01puQYUHTAlrX5ZWVGiRAmzbx38R49Xp5LRpoiu0wlpP0O9OdDpYP7v//5PDh8+bOYYdh1YJ7vH1rZtW2natKmp0dA+hNp/S5syajDTUt/U+76RgXXefvttMzWPzlmsc+Pquej0R3quGfWz9BYdhEenvHnllVfMtdb5cLUpZ1bpNDdae+Q6VY/VRM6iUz5pTZM2zatWrZq5cdBaloymNUrNE/vIKv3eW/MLa99EnS5Ir4/SeaZ1AQB/Qawn1meGWO9OpxPUwSN1v9qyZf78+W7rdWpH15YksBcSftiKDnyidDAdDbA1a9Y0wUaDbGYBR+eq1YD4/vvvy6+//moGTtFSaf3jbg2KoqPOzpkzR4YMGSK9evUyNxva9yqnNwE6X7A2gxs7dqwp/dc/whpUXOdd1YCjTb20NFYDtI68qqX+1hy2luwcmza505sivV5a4qvbaYDWeVtT7/dGaH+1TZs2yYsvvmiOLSkpSSpXrmzeT28MfEHPWZtIjh8/3lxz/YyzcxOg22s/O/1e6NzEGqDj4+PdmmZOnjzZlNprQNWmeTpAlAbwzJqZuvLEPrJKb5Zdb2LUsGHDzKPOc0zCD8CfEOuJ9Zkh1rvbv3+/aT2ihfpPPvnkdev1syLht68QnZvP1wcBAIFAm0D26dPnuuaRAADAHoj1sBv68AMAAAAAYEM06QcQ9HSwocyaK1pNPQEAQOAh1iNYkfADCHqlS5fOcL1OmaN99wAAQGAi1iNYkfADCHo6unNGypQpYx4Z8gQAgMBErEewYtA+AAAAAABsiEH7AAAAAACwIZr0Z0FKSoocP37czO2qU3UAAOBr2kBP55fWZqg63zZuHPEeAGC3WE/CnwUa/KOjo319GAAAXOfYsWNStmxZXx+GLRDvAQB2i/Uk/FmgJf3WhQ4PD/f14QAAIElJSSY5tWIUbhzxHgBgt1hPwp8FVrM+Df7cAAAA/AlNzz2HeA8AsFusp9MfAAAAAAA2RMIPAAAAAIANkfADAAAAAGBDJPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA2R8AMAAAAAYEMk/AAAAAAA2BAJPwAAAAAANkTCDwAAAACADYX6+gAA+Kfu8YnprpvVrUGOfzcrvw8AAPzbjcb6G7nPAJB1JPwAAAAAsp3UA/B/NOkHAAAAAMCGqOEHEFDoLgAAAABkDQk/AFuhQAAAgP8iJgKgST8AAAAAADZEDT+AoMKowIB/GzdunAwZMkSeffZZmTRpknnt0qVL8txzz8nChQslOTlZYmNjZfr06RIZGen8vaNHj0rv3r1l48aNUqRIEYmLi5OxY8dKaCi3OkCgoWUC4DnU8AMAAL+QmJgob7/9ttSqVcvt9QEDBsjy5ctl8eLFsnnzZjl+/Lh07NjRuf7atWvSpk0buXz5snz55ZcyZ84ciY+Pl+HDh/vgLAAA8B8k/AAAwOfOnz8vnTt3lnfffVeKFy/ufP3cuXMya9YsmTBhgjRr1kzq1asns2fPNon91q1bzTZr166V/fv3y7x586ROnTrSqlUrGT16tEybNs0UAgAAEKxo5wbYFM3hAASSPn36mFr6Fi1ayCuvvOJ8fefOnXLlyhXzuqVKlSpSrlw5SUhIkEaNGpnHmjVrujXx12b/2sR/3759Urdu3TTfU7sH6GJJSkry2vkBAOALJPxAkMqsQAAAcov2zd+1a5dp0p/ayZMnJX/+/FKsWDG31zW513XWNq7JvrXeWpce7eM/cuRID50FAAD+hyb9AADAZ44dO2YG6Js/f76EhYXl6nvr4IDaZcBa9FgAALATv074teS9QYMGUrRoUSlVqpS0b99eDhw44LZNkyZNJCQkxG3p1auX2zY6cq82EyxUqJDZz6BBg+Tq1au5fDYAACA1bbJ/+vRpufPOO82I+rrowHxTpkwxP2tNvfbDP3v2rNvvnTp1SqKioszP+qjPU6+31qWnQIECEh4e7rYAAGAnfp3wa8DXPn06KM+6detMH76WLVvKhQsX3Lbr0aOHnDhxwrmMHz/euY6RewEA8F/NmzeXvXv3yu7du51L/fr1zQB+1s/58uWT9evXO39HC/+1MD8mJsY810fdhxYcWPS+QRP4atWq+eS8AADwB37dh3/16tVuzzVR1xp6rQ247777nK9rzX16JfjWyL2fffaZqSXQ0Xt15N4XXnhBXn75ZdMvEAAA+Ia24qtRo4bba4ULF5aSJUs6X+/evbsMHDhQSpQoYZL4fv36mSRfB+xTWhmgiX2XLl1Mob/22x86dKipNNBafAAAgpVfJ/ypaf86pQHflfb706l4NOlv27atDBs2zBQCqJyM3MuovYB3B/zL6PeZPQBAahMnTpQ8efJIp06dTHzWOD59+nTn+rx588qKFStMbNeCAC0wiIuLk1GjRvn0uAEA8LWASfhTUlKkf//+cs8997jVBPztb3+T8uXLS5kyZWTPnj2m5l6b+i1ZsiTHI/cyai8AAL6zadMmt+c6mN+0adPMkh69F1i5cmUuHB0AAIEjYBJ+bZb3zTffyOeff+72es+ePZ0/a01+6dKlTX/Aw4cPy+23357jUXu16aBrDX90dPQNHD0AAAAAALkrIBL+vn37mqZ6W7ZskbJly2a4bcOGDc3joUOHTMKvzfy3b9+erZF7tb8fff4AZBddFQAAwdRFD4D/8+tR+h0Oh0n2ly5dKhs2bJAKFSpk+js6oq/Smn7FyL0AAAAAgGAU6u/N+BcsWCAff/yxGcXX6nMfEREhBQsWNM32dX3r1q3NaL7ah3/AgAFmBP9atWqZbRm5FwAAAAAQjPw64Z8xY4Z5bNKkidvrs2fPlm7dupkp9XS6vUmTJsmFCxdMP3sdwVcTegsj9wLwFJo+AgAAIJCE+nuT/oxogr958+ZM98PIvbBrgkm/cAAAAAABmfADAAAASB+tzwAE7KB9AAAAAAAgZ0j4AQAAAACwIZr0AwAAAPAouhoA/oGEH/CyjAIeg+4BAAAA8Baa9AMAAAAAYEPU8AMAAACwDVpXAv9Dwg8AftCXkRsQAAAAeBoJPwD8fwwwBAAAADuhDz8AAAAAADZEwg8AAAAAgA3RpB8IYDRBBwAAAJAeEn7Ah0jYAQBARrhXuB7XBMg6mvQDAAAAAGBDJPwAAMCnZsyYIbVq1ZLw8HCzxMTEyKpVq5zrmzRpIiEhIW5Lr1693PZx9OhRadOmjRQqVEhKlSolgwYNkqtXr/rgbAAA8B806YctMMc5AASusmXLyrhx46RSpUricDhkzpw50q5dO/nqq6+kevXqZpsePXrIqFGjnL+jib3l2rVrJtmPioqSL7/8Uk6cOCFdu3aVfPnyyauvvuqTcwIAwB+Q8APwK8HaL49CKwSztm3buj0fM2aMqfXfunWrM+HXBF8T+rSsXbtW9u/fL5999plERkZKnTp1ZPTo0fLCCy/Iyy+/LPnz58+V8wAAwN+Q8AMAAL+htfWLFy+WCxcumKb9lvnz58u8efNM0q8FBMOGDXPW8ickJEjNmjVNsm+JjY2V3r17y759+6Ru3bppvldycrJZLElJSV49NwC+RwE7gg0JPwAA8Lm9e/eaBP/SpUtSpEgRWbp0qVSrVs2s+9vf/ibly5eXMmXKyJ49e0zN/YEDB2TJkiVm/cmTJ92SfWU913XpGTt2rIwcOdKr5wUAgC+R8AMAAJ+rXLmy7N69W86dOycfffSRxMXFyebNm03S37NnT+d2WpNfunRpad68uRw+fFhuv/32HL/nkCFDZODAgW41/NHR0Td8LgAA+AsSfgAIcDRPhB1oP/uKFSuan+vVqyeJiYkyefJkefvtt6/btmHDhubx0KFDJuHXZv7bt2932+bUqVPmMb1+/6pAgQJmAQDArpiWDwAA+J2UlBS3/vWutCWA0pp+pV0BtEvA6dOnndusW7fOTPFndQsAACAYUcMPAAB8SpvWt2rVSsqVKyd//PGHLFiwQDZt2iRr1qwxzfb1eevWraVkyZKmD/+AAQPkvvvuk1q1apnfb9mypUnsu3TpIuPHjzf99ocOHSp9+vShBh8AENRI+IFMmkTTHBoAvEtr5rt27SonTpyQiIgIk8hrsv/AAw/IsWPHzHR7kyZNMiP3ax/7Tp06mYTekjdvXlmxYoUZlV9r+wsXLmzGABg1apRPzwsAAF8j4QcAAD41a9asdNdpgq+D92VGR/FfuXKlh48MAIDARh9+AAAAAABsiBp+AAhydGkBAACwJ2r4AQAAAACwIRJ+AAAAAABsiCb9AAAAgB92qwKAG0UNPwAAAAAANkQNPwDAaxgQEAAAwHdI+BEUaC4HAAAAINjQpB8AAAAAABuihh8AAgCtVAAgcPE3HICvkPADgM1xowkAABCcaNIPAAAAAIANkfADAAAAAGBDft2kf+zYsbJkyRL57rvvpGDBgnL33XfLa6+9JpUrV3Zuc+nSJXnuuedk4cKFkpycLLGxsTJ9+nSJjIx0bnP06FHp3bu3bNy4UYoUKSJxcXFm36Ghfn36CBA0lwYAAADgj/w64928ebP06dNHGjRoIFevXpWXXnpJWrZsKfv375fChQubbQYMGCCffvqpLF68WCIiIqRv377SsWNH+eKLL8z6a9euSZs2bSQqKkq+/PJLOXHihHTt2lXy5csnr776qo/PEAAAAECgVOTM6tYg144FsH3Cv3r1arfn8fHxUqpUKdm5c6fcd999cu7cOZk1a5YsWLBAmjVrZraZPXu2VK1aVbZu3SqNGjWStWvXmgKCzz77zNT616lTR0aPHi0vvPCCvPzyy5I/f/7r3ldbCuhiSUpKyoWzBQAAAAAgSPvwa4KvSpQoYR418b9y5Yq0aNHCuU2VKlWkXLlykpCQYJ7rY82aNd2a+Guzf03i9+3bl+b7aHN/bS1gLdHR0V4+MwAAAAAAgjThT0lJkf79+8s999wjNWrUMK+dPHnS1NAXK1bMbVtN7nWdtY1rsm+tt9alZciQIaZwwVqOHTvmpbMCAAAAACAIm/S70r7833zzjXz++edef68CBQqYBQAAAACAQBUQCb8OxLdixQrZsmWLlC1b1vm6DsR3+fJlOXv2rFst/6lTp8w6a5vt27e77U/XW+sQGIOkMEAKAAAAANioSb/D4TDJ/tKlS2XDhg1SoUIFt/X16tUzo+2vX7/e+dqBAwfMNHwxMTHmuT7u3btXTp8+7dxm3bp1Eh4eLtWqVcvFswEAAGmZMWOG1KpVy8RmXTR2r1q1ym0KXm3pV7JkSTO9bqdOnZyF9xaN/TorT6FChcwAv4MGDTIz/AAAEMz8uoZfg7uOwP/xxx9L0aJFnX3udSC9ggULmsfu3bvLwIEDzUB+epPQr18/c6OgI/QrncZPE/suXbrI+PHjzT6GDh1q9k2z/dwVqPPVB+pxA57A9ETIDdp6b9y4cVKpUiVT2D9nzhxp166dfPXVV1K9enWm4AUAwI4Jv5b4qyZNmri9rlPvdevWzfw8ceJEyZMnjynt16n0dAT+6dOnO7fNmzev6Q7Qu3dvUxBQuHBhiYuLk1GjRuXy2QAAgLS0bdvW7fmYMWPMPYBOsauFAd6YghcAgGDg1wm/lvJnJiwsTKZNm2aW9JQvX15Wrlzp4aMDAACeprX1WpN/4cIFU1Cf2RS8mvCnNwWvFvbrFLx169ZN8720okAXi07ZCwAZoeUbAo1f9+EHAADBQcfb0f752t2uV69eZvwe7ZLnrSl41dixY00XAWuJjo72yrkBAOArJPwAAMDnKleuLLt375Zt27aZmnntfqfN9L1pyJAhcu7cOedy7Ngxr74fAAC5za+b9AMAgOCgtfgVK1Z0zsKTmJgokydPlr/+9a9em4JXWxMwgC8AwM5I+AEAgN9JSUkx/etdp+DVAXrTm4JXB/rTKXh1Sj7FFLwA/K2PP/374Qsk/AAAwKe0aX2rVq3MQHx//PGHGZF/06ZNsmbNGqbgBQDgBpDwAwAAn9Ka+a5du8qJEydMgl+rVi2T7D/wwANmPVPwAgCQMyT8AADAp2bNmpXheqbgBQAgZ0j4YYs5TwEAAAAA7piWDwAAAAAAGyLhBwAAAADAhmjSDwAAANwAuh4C8FfU8AMAAAAAYEMk/AAAAAAA2BAJPwAAAAAANkQffgCAX/Z5ndWtQa4dCwAAgB1Rww8AAAAAgA2R8AMAAAAAYEMk/AAAAAAA2BB9+OGxPrf0twUAAAAA/0ENPwAAAAAANkTCDwAAAACADZHwAwAAAABgQyT8AAAAAADYEAk/AAAAAAA2xCj9yPIo/AAAAACAwEENPwAAAAAANkTCDwAAAACADZHwAwAAnxo7dqw0aNBAihYtKqVKlZL27dvLgQMH3LZp0qSJhISEuC29evVy2+bo0aPSpk0bKVSokNnPoEGD5OrVq7l8NgAABEHC/8MPP3hr1wAAwA94KtZv3rxZ+vTpI1u3bpV169bJlStXpGXLlnLhwgW37Xr06CEnTpxwLuPHj3euu3btmkn2L1++LF9++aXMmTNH4uPjZfjw4R45RgAAApHXEv6KFStK06ZNZd68eXLp0iVvvQ0AAPART8X61atXS7du3aR69epSu3Ztk6hrbf3OnTvdttOa+6ioKOcSHh7uXLd27VrZv3+/OZY6depIq1atZPTo0TJt2jRTCAAAQDDyWsK/a9cuqVWrlgwcONAE5aefflq2b9/urbcDAAC5zFux/ty5c+axRIkSbq/Pnz9fbrrpJqlRo4YMGTJELl686FyXkJAgNWvWlMjISOdrsbGxkpSUJPv27UvzfZKTk8161wUAADvxWsKvpeuTJ0+W48ePy/vvv2+a3jVu3NgE6QkTJsgvv/zirbcGAAC5wBuxPiUlRfr37y/33HOP2Y/lb3/7m6m937hxo0n2//nPf8rjjz/uXH/y5Em3ZF9Zz3VdemMHREREOJfo6OhsHy8AAEE9aF9oaKh07NhRFi9eLK+99pocOnRInn/+eRNUu3btam4OAABA4PJkrNe+/N98840sXLjQ7fWePXuaGnutxe/cubPMnTtXli5dKocPH87xcWvBgbYmsJZjx47leF8AAPijUG+/wY4dO0ypvwbuwoULmxuA7t27y08//SQjR46Udu3a0dQfAHCd7vGJGa6f1a1Brh0LcifW9+3bV1asWCFbtmyRsmXLZrhtw4YNzaMWLtx+++2mS0Hq9zh16pR51HVpKVCggFkAALArryX82pRv9uzZZlqd1q1bm5J4fcyT57+NCipUqGAG5bn11lu9dQgAAMCLPBXrHQ6H9OvXz9TYb9q0yfxeZnbv3m0eS5cubR5jYmJkzJgxcvr0aTMln9IR/3Vgv2rVqnngbGFnFDACsCuvJfwzZsyQJ5980oy6awXj1DQgz5o1y1uHAAAI0ht0bs5zh6divTbjX7BggXz88cdStGhRZ5977VdfsGBB02xf12thQsmSJWXPnj0yYMAAue+++8yggUqn8dPEvkuXLma6Pt3H0KFDzb6pxQcABCuvJfxaql6uXDlnKb9rKb72kdN1+fPnl7i4OG8dAgAA8CJPxXotOFBNmjRxe11bD2hhgu7js88+k0mTJsmFCxfM2ACdOnUyCb0lb968pjtA7969TW2/di3Q9x01apRHzxkAgEDitYRf+9PpID1WszrLmTNnTFO9a9euZWk/2o/v9ddfN3Px6v60uV/79u2d6/VGYM6cOW6/o4P66Jy+ru+pTQWXL19ubkr0JkFHFS5SpMgNnycABLPMmsHC3jwV67WAICOa4G/evDnT/ZQvX15WrlyZpfcEgNxG1xHYapT+9IL3+fPnJSwsLMv70ZL82rVry7Rp09Ld5sEHHzQ3HNbywQcfuK3X0Xx1Dl6tibAGA9LRfgEAgO9jPQAACJAa/oEDB5rHkJAQGT58uBQqVMi5Tkv6t23bZubtzapWrVqZJSPaNy+9EXi//fZbU9ufmJgo9evXN69NnTrV9AN84403pEyZMlk+FgAA4PlYDwAAAiTh/+qrr5yl/nv37jX97iz6s9bW63Q9nqQj+mpzwuLFi0uzZs3klVdeMYP6qISEBClWrJgz2VctWrQwTfv1hqRDhw7X7S85OdkslqSkJI8eLwAAgcwXsR4AAPhBwr9x40bz+MQTT5h+8jodjjdpc/6OHTuavoI6iu9LL71kWgRooq8D+Ogovan7FoaGhkqJEiWcowCnNnbsWDNvMLKHvrwAEBxyO9YDvsY9DoBA5bVB+3Rk3dzw6KOPOn+uWbOmmZ5HBxHSWv/mzZvnaJ9DhgxxNle0avh1wCAAAJD7sR4AAPhBwq817fHx8aakX3/OyJIlS8QbbrvtNrnpppvk0KFDJuHXvv2nT5922+bq1atmBOH0+v3rmADM2QsAgH/GegAA4IOEPyIiwgzgY/3sCz/99JP89ttvUrp0afNc5+I9e/asmdavXr165rUNGzZISkqKNGzY0CfHCABAoPKHWA8AAHyQ8Ls27fNUMz+d2kdr6y1HjhyR3bt3mz74umhf+06dOpnaeu3DP3jwYKlYsaLExsaa7atWrWr6+ffo0UNmzpwpV65ckb59+5quAIzQDwCA72M9ACDzsSJmdWuQa8cC+/BaH/4///zTjN5rTdXz448/ytKlS6VatWrSsmXLLO9nx44d0rRpU+dzq299XFyczJgxQ/bs2SNz5swxtfiawOu+R48e7dYkf/78+SbJ1yb+Ojq/FhBMmTLFo+cLAPAf3DTlDk/FegAAEGAJf7t27Uzfvl69eplk/K677jJT9fz6668yYcIE6d27d5b206RJE3MzkZ41a9Zkug9tCbBgwYJsHT8AAMidWA/kBkbaBxCMvJbw79q1SyZOnGh+/uijj0yTe52391//+pcMHz6cmwAfIdgBADyFWA8AgH/L460dX7x4UYoWLWp+Xrt2rakB0Ob0jRo1Mk3+AABAYCPWAwAQpDX8OnDesmXLpEOHDqbZ/YABA8zrOkWeTuUDAIC/YgyArCHWAwAQpDX82pTv+eefl1tvvdVMf6fT41k1AHXr1vXW2wIAgFxCrAcAIEhr+B955BFp3LixnDhxQmrXru18XUfK15oAAAAQ2Ij1AAAEacKvdPAeXVzpCL4AAMAeiPUAAARhwn/hwgUZN26crF+/3vTlS0lJcVv/ww8/eOutAQDIEDOWeAaxHgCAIE34n3rqKdm8ebN06dJFSpcuLSEhId56K9u50cGiuJEFAOQGYj0AAEGa8K9atUo+/fRTueeee7z1FgAAwIeI9QAABOko/cWLF5cSJUp4a/cAAMDHiPUAAARpwj969GgzXc/Fixe99RYAAMCHPBXrx44dKw0aNJCiRYtKqVKlpH379nLgwAG3bS5duiR9+vSRkiVLSpEiRaRTp05y6tQpt22OHj0qbdq0kUKFCpn9DBo0SK5evXpDxwYAQCDzWpP+N998Uw4fPiyRkZFmft58+fK5rd+1a5e33hoAAOQCT8V6HQdAk3lN+jVBf+mll6Rly5ayf/9+KVy4sNlmwIABpvvA4sWLJSIiQvr27SsdO3aUL774wqy/du2aSfZ1xoAvv/zSTBXYtWtXc0yvvvqqF84eAHJXRuN0ZTbGF4KX1xJ+LZ0HAMCOuOnybKxfvXq12/P4+HhTQ79z506577775Ny5czJr1ixZsGCBNGvWzGwze/ZsqVq1qmzdulUaNWoka9euNQUEn332mSmAqFOnjmmB8MILL8jLL78s+fPn98ixAgAQSLyW8I8YMcJbuwYAAH7AW7FeE3xljQ+gif+VK1ekRYsWzm2qVKki5cqVk4SEBJPw62PNmjVNsm+JjY2V3r17y759+6Ru3brXvU9ycrJZLElJSV45HwAAbJfwq7Nnz8pHH31kmvtpPzoN3Nq8T4PxLbfc4s23BgAAucDTsT4lJUX69+9vRv6vUaOGee3kyZOmhr5YsWJu2+p76DprG9dk31pvrUtv7ICRI0dm+xjhn5iWGAByMeHfs2ePKYnXfnb/+c9/pEePHuYmYMmSJWZQnblz53rrrQEAQC7wRqzXvvzffPONfP755+JtQ4YMkYEDB7rV8EdHR3v9fQEACPhR+jWAduvWTQ4ePChhYWHO11u3bi1btmzx1tsCAIBc4ulYrwPxrVixQjZu3Chly5Z1vq4D8V2+fNm0JnClo/TrOmub1KP2W8+tbVIrUKCAhIeHuy0AANiJ1xL+xMREefrpp697XZv3pde0DgAABA5PxXqHw2GS/aVLl8qGDRukQoUKbuvr1atnRttfv3698zWdtk9bEcTExJjn+rh37145ffq0c5t169aZJL5atWo5PEMAAAKb15r0a6l5WoPffP/993LzzTd7620BAEAu8VSs12b8OgL/xx9/LEWLFnUWFmhXgYIFC5rH7t27mxYF2mVAk/h+/fqZJF8H7FM6jZ8m9l26dJHx48ebfQwdOtTsW48TAIBg5LUa/ocfflhGjRplRtVVISEhpiRep8fp1KmTt94WAADkEk/F+hkzZpiR+Zs0aSKlS5d2Lh9++KFzm4kTJ8pDDz1k9qtT9WkzfR0rwJI3b17THUAftSDg8ccfl65du5rjAwAgWIU4tB2dF2jgfuSRR0xzv/Pnz0uZMmVMabsG4ZUrV0rhwoUlUGjthdYu6DnlRv8+RpkFgMA1q1sDW8Ymu8d6f7mmyDnunxDMciv2IPDiktea9OuBad+5L774Qr7++mtzI3DnnXe6zaELAAACF7EeAAD/5pWEX+fQjY+PN03tdJoebeKnA/Bo8zttUKDPAQBA4CLWAwAQhH34Nchrn76nnnpKfv75Z6lZs6ZUr15dfvzxRzN1T4cOHTz9lgAAIBcR6wEACNIafi3t17l3deqcpk2buq3TqXbat28vc+fONQPpAACAwEOsBwAgSGv4P/jgA3nppZeuuwFQzZo1kxdffFHmz5/v6bcFAAC5hFgPAECQJvx79uyRBx98MN31rVq1MgP7AACAwESsBwAgSBP+M2fOSGRkZLrrdd3vv//u6bcFAAC5hFgPAECQ9uG/du2ahIamv9u8efPK1atXPf22AAAglxDrAcC/dI9PzHD9rG4Ncu1YYPOEX0fu1RF6CxQokOb65ORkT78lAADIRcR6AACCNOGPi4vLdBtG7QUAIHAR6+GPNZgAgFxI+GfPnu3pXQIAAD9CrAcAIEgH7QMAAAAAAL5Hwg8AAAAAgA2R8AMAAAAAYEMk/AAAAAAA2BAJPwAAAAAANkTCDwAAAACADfl9wr9lyxZp27atlClTRkJCQmTZsmVu6x0OhwwfPlxKly4tBQsWlBYtWsjBgwfdtjlz5ox07txZwsPDpVixYtK9e3c5f/58Lp8JAAAAAAC5x+8T/gsXLkjt2rVl2rRpaa4fP368TJkyRWbOnCnbtm2TwoULS2xsrFy6dMm5jSb7+/btk3Xr1smKFStMIULPnj1z8SwAAAAAAMhdoeLnWrVqZZa0aO3+pEmTZOjQodKuXTvz2ty5cyUyMtK0BHj00Ufl22+/ldWrV0tiYqLUr1/fbDN16lRp3bq1vPHGG6blQGrJyclmsSQlJXnt/AAAAAAACMoa/owcOXJETp48aZrxWyIiIqRhw4aSkJBgnuujNuO3kn2l2+fJk8e0CEjL2LFjzX6sJTo6OhfOBgCA4JRZ971u3bqZ112XBx980G0buu8BAGCzhF+TfaU1+q70ubVOH0uVKuW2PjQ0VEqUKOHcJrUhQ4bIuXPnnMuxY8e8dg4AAAS7zLrvKU3wT5w44Vw++OADt/V03wMAIACb9PtCgQIFzAIAAHzbfc+icTkqKirNdTnpvgcAQDAI6Bp+K/CfOnXK7XV9bq3Tx9OnT7utv3r1qmn6l96NAwAA8C+bNm0yLfYqV64svXv3lt9++825Lifd95SO16Pj9LguAADYSUAn/BUqVDBJ+/r1652vabDW4B4TE2Oe6+PZs2dl586dzm02bNggKSkppq8/AADwb9qcXwfl1Xj/2muvyebNm02LgGvXruW4+55izB4AgN35fZN+HXDn0KFDbgP17d692wTxcuXKSf/+/eWVV16RSpUqmQKAYcOGmaZ77du3N9tXrVrV3Cj06NHDTN135coV6du3rxnB35dN/LrHJ/rsvQEACCQasy01a9aUWrVqye23325q/Zs3b57j/eqYPQMHDnSrNCDpBwDYid8n/Dt27JCmTZs6n1uBOS4uTuLj42Xw4MFmsB8dmEdr8hs3bmz68YWFhTl/Z/78+SbJ15sCbd7XqVMnmTJlik/OBwBgb5kV6M7q1iDXjsWubrvtNrnppptMhYDG9px232PMHgCA3fl9wt+kSRNxOBzprtepeUaNGmWW9GhrgAULFnjpCAEAQG766aefTB/+0qVLX9d9r169euY1uu8BABAACT8AALC3jLrv6TJy5EjTOk9r6w8fPmxa91WsWFFiY2P9uvseAAC+RsIPAAD8tvvejBkzZM+ePTJnzhxTi68JfMuWLWX06NFuzfHpvmcPjHEEAJ5Fwg8AAPy6+96aNWsy3Qfd9wAAsNm0fAAAAAAAIG3U8AMAACBX0GQfAHIXNfwAAAAAANgQNfwAAAAAEMSta2Z1a5Brx4LcRQ0/AAAAAAA2RMIPAAAAAIANkfADAAAAAGBDJPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA2R8AMAAAAAYEOhvj4AAAAAAIDvdI9PzPHvzurWwKPHAs+ihh8AAAAAABsi4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhkj4AQAAAACwIRJ+AAAAAABsiIQfAAAAAAAbIuEHAAA+tWXLFmnbtq2UKVNGQkJCZNmyZW7rHQ6HDB8+XEqXLi0FCxaUFi1ayMGDB922OXPmjHTu3FnCw8OlWLFi0r17dzl//nwunwkAAP4l1NcHAAAAgtuFCxekdu3a8uSTT0rHjh2vWz9+/HiZMmWKzJkzRypUqCDDhg2T2NhY2b9/v4SFhZltNNk/ceKErFu3Tq5cuSJPPPGE9OzZUxYsWOCDMwKA4NE9PjHD9bO6Nci1Y8H1SPgBAIBPtWrVyixp0dr9SZMmydChQ6Vdu3bmtblz50pkZKRpCfDoo4/Kt99+K6tXr5bExESpX7++2Wbq1KnSunVreeONN0zLgbQkJyebxZKUlOSV8wMAwFdI+AEAgN86cuSInDx50jTjt0REREjDhg0lISHBJPz6qM34rWRf6fZ58uSRbdu2SYcOHdLc99ixY2XkyJG5ch7BIrOaPgBA7qIPPwAA8Fua7Cut0Xelz611+liqVCm39aGhoVKiRAnnNmkZMmSInDt3zrkcO3bMK+cAAICvUMMPAACCUoECBcwCAIBdUcMPAAD8VlRUlHk8deqU2+v63Fqnj6dPn3Zbf/XqVTNyv7UNAADBiIQfAAD4LR2VX5P29evXuw2up33zY2JizHN9PHv2rOzcudO5zYYNGyQlJcX09QcAIFjRpB8AAPjU+fPn5dChQ24D9e3evdv0wS9Xrpz0799fXnnlFalUqZJzWj4deb99+/Zm+6pVq8qDDz4oPXr0kJkzZ5pp+fr27WsG9EtvhH7kHAPzAUDgIOEHAAA+tWPHDmnatKnz+cCBA81jXFycxMfHy+DBg+XChQvSs2dPU5PfuHFjMw1fWFiY83fmz59vkvzmzZub0fk7deokU6ZM8cn5AADgL0j4AQCATzVp0kQcDke660NCQmTUqFFmSY+2BliwYIGXjhAAgMBEH34AAAAAAGyIhB8AAAAAABsK+IT/5ZdfNk39XJcqVao411+6dEn69OkjJUuWlCJFipg+famn9gEAAAAAwG4CPuFX1atXlxMnTjiXzz//3LluwIABsnz5clm8eLFs3rxZjh8/Lh07dvTp8QIAAAAA4G22GLQvNDTUzNGb2rlz52TWrFlmEJ9mzZqZ12bPnm2m79m6das0atTIB0cLAAAAAID32aKG/+DBg2ae3dtuu006d+4sR48eNa/v3LnTzMXbokUL57ba3F/n9E1ISEh3f8nJyZKUlOS2AAAAAAAQSAI+4W/YsKGZo1fn450xY4YcOXJE7r33Xvnjjz/k5MmTkj9/filWrJjb70RGRpp16Rk7dqxEREQ4l+jo6Fw4EwAAAAAAPCfgm/S3atXK+XOtWrVMAUD58uVl0aJFUrBgwRztc8iQITJw4EDnc63hJ+kHAAAAAASSgK/hT01r8++44w45dOiQ6dd/+fJlOXv2rNs2Okp/Wn3+LQUKFJDw8HC3BQAAAACAQBLwNfypnT9/Xg4fPixdunSRevXqSb58+WT9+vVmOj514MAB08c/JibG14cKAAAAAEGre3xihutndWuQa8diVwGf8D///PPStm1b04xfp9wbMWKE5M2bVx577DHT/7579+6meX6JEiVMTX2/fv1Mss8I/QAAAAAAOwv4hP+nn34yyf1vv/0mN998szRu3NhMuac/q4kTJ0qePHlMDb+Ovh8bGyvTp0/39WEDAAAAAOBVAZ/wL1y4MMP1YWFhMm3aNLMAAADgxprYAkB28DfFt2w3aB8AAAAAACDhBwAAAADAlkj4AQAAAACwIRJ+AAAAAABsiIQfAAAAAAAbIuEHAAAAAMCGSPgBAAAAALAhEn4AAOD3Xn75ZQkJCXFbqlSp4lx/6dIl6dOnj5QsWVKKFCkinTp1klOnTvn0mAEA8DUSfgAAEBCqV68uJ06ccC6ff/65c92AAQNk+fLlsnjxYtm8ebMcP35cOnbs6NPjBQDA10J9fQAAAABZERoaKlFRUde9fu7cOZk1a5YsWLBAmjVrZl6bPXu2VK1aVbZu3SqNGjVKc3/JyclmsSQlJXnx6AEAyH0k/AAAICAcPHhQypQpI2FhYRITEyNjx46VcuXKyc6dO+XKlSvSokUL57ba3F/XJSQkpJvw6++PHDlSgk33+ERfHwIAIJeQ8AMAAL/XsGFDiY+Pl8qVK5vm/Jqo33vvvfLNN9/IyZMnJX/+/FKsWDG334mMjDTr0jNkyBAZOHCgWw1/dHS0BAKSdgDBILO/dbO6Nci1YwlUJPwAAMDvtWrVyvlzrVq1TAFA+fLlZdGiRVKwYMEc7bNAgQJmAQDArhi0DwAABBytzb/jjjvk0KFDpl//5cuX5ezZs27b6Cj9afX5BwAgWJDwAwCAgHP+/Hk5fPiwlC5dWurVqyf58uWT9evXO9cfOHBAjh49avr6AwAQrGjSDwAA/N7zzz8vbdu2Nc34dcq9ESNGSN68eeWxxx6TiIgI6d69u+mPX6JECQkPD5d+/fqZZD+9AfsAAAgGJPwAAMDv/fTTTya5/+233+Tmm2+Wxo0bmyn39Gc1ceJEyZMnj3Tq1MlMtRcbGyvTp0/39WEDAOBTJPwAAMDvLVy4MMP1OlXftGnTzAIAAP6LhB8AAAAAYKtp+5iy779I+AEAAAJs7mkAALKChB8AAAAAEFQFp7OCpAUACT8AAAAAIKh0D5ICgTy+PgAAAAAAAOB51PADAAD4AP30AQDeRg0/AAAAAAA2RMIPAAAAAIANkfADAAAAAGBDJPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA0xLR8AAIAXMO0eAMDXqOEHAAAAAMCGSPgBAAAAALAhmvQDAAAAAJCNblmzujWQQEDCDwAAAACAhwoE/KkwgCb9AAAAAADYEAk/AAAAAAA2RMIPAAAAAIANBVXCP23aNLn11lslLCxMGjZsKNu3b/f1IQEAAA8i1gMAEIQJ/4cffigDBw6UESNGyK5du6R27doSGxsrp0+f9vWhAQAADyDWAwAQpKP0T5gwQXr06CFPPPGEeT5z5kz59NNP5f3335cXX3zRbdvk5GSzWM6dO2cek5KSPHY8l/8877F9AQACh6diibUfh8Phkf0FW6zPjXhPrAeA4JTkT7HeEQSSk5MdefPmdSxdutTt9a5duzoefvjh67YfMWKEXlEWFhYWFha/X44dO5aLEdU+sV4R71lYWFhYxOaxPihq+H/99Ve5du2aREZGur2uz7/77rvrth8yZIhpEmhJSUmRM2fOSMmSJSUkJCRHJTPR0dFy7NgxCQ8Pz+FZBBeuWc5w3XKG65Z9XDPfXzct7f/jjz+kTJkyHju+YIr1no73/J/IGa5b9nHNcobrljNct8CP9UGR8GdXgQIFzOKqWLFiN7xf/cD5j5I9XLOc4brlDNct+7hmvr1uERERHjmeYOWNeM//iZzhumUf1yxnuG45w3UL3FgfFIP23XTTTZI3b145deqU2+v6PCoqymfHBQAAPINYDwBAkCb8+fPnl3r16sn69evdmu3p85iYGJ8eGwAAuHHEegAAgrhJv/bRi4uLk/r168tdd90lkyZNkgsXLjhH8vUmbS6oUwSlbjaI9HHNcobrljNct+zjmuUM1827iPWBh+uWfVyznOG65QzXLfCvWYiO3CdB4q233pLXX39dTp48KXXq1JEpU6ZIw4YNfX1YAADAQ4j1AAAEacIPAAAAAECwCIo+/AAAAAAABBsSfgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhkj4vWzatGly6623SlhYmBklePv27b4+JL8yduxYadCggRQtWlRKlSol7du3lwMHDrhtc+nSJenTp4+ULFlSihQpIp06dZJTp0757Jj9zbhx4yQkJET69+/vfI1rlraff/5ZHn/8cXNdChYsKDVr1pQdO3Y41+sYpsOHD5fSpUub9S1atJCDBw9KsLp27ZoMGzZMKlSoYK7H7bffLqNHjzbXycI1E9myZYu0bdtWypQpY/4vLlu2zG19Vq7RmTNnpHPnzhIeHi7FihWT7t27y/nz53P5THAjiPfpI9bfOGJ91hHrs494b/N4r6P0wzsWLlzoyJ8/v+P999937Nu3z9GjRw9HsWLFHKdOnfL1ofmN2NhYx+zZsx3ffPONY/fu3Y7WrVs7ypUr5zh//rxzm169ejmio6Md69evd+zYscPRqFEjx9133+3T4/YX27dvd9x6662OWrVqOZ599lnn61yz6505c8ZRvnx5R7du3Rzbtm1z/PDDD441a9Y4Dh065Nxm3LhxjoiICMeyZcscX3/9tePhhx92VKhQwfHnn386gtGYMWMcJUuWdKxYscJx5MgRx+LFix1FihRxTJ482bkN18zhWLlypeMf//iHY8mSJXpn5Fi6dKnb+qxcowcffNBRu3Ztx9atWx3//ve/HRUrVnQ89thjPjgb5ATxPmPE+htDrM86Yn3OEO/tHe9J+L3orrvucvTp08f5/Nq1a44yZco4xo4d69Pj8menT582/4E2b95snp89e9aRL18+84fH8u2335ptEhISHMHsjz/+cFSqVMmxbt06x/333++8CeCape2FF15wNG7cON31KSkpjqioKMfrr7/ufE2vZYECBRwffPCBIxi1adPG8eSTT7q91rFjR0fnzp3Nz1yz66W+AcjKNdq/f7/5vcTEROc2q1atcoSEhDh+/vnnXD4D5ATxPnuI9VlHrM8eYn3OEO/tHe9p0u8lly9flp07d5qmHJY8efKY5wkJCT49Nn927tw581iiRAnzqNfwypUrbtexSpUqUq5cuaC/jtqMr02bNm7XRnHN0vbJJ59I/fr15S9/+YtpUlq3bl159913neuPHDkiJ0+edLtuERERpmlusF63u+++W9avXy/ff/+9ef7111/L559/Lq1atTLPuWaZy8o10kdt1qffT4turzFj27ZtPjluZB3xPvuI9VlHrM8eYn3OEO/tHe9DvbbnIPfrr7+a/jCRkZFur+vz7777zmfH5c9SUlJM37R77rlHatSoYV7T/zj58+c3/zlSX0ddF6wWLlwou3btksTExOvWcc3S9sMPP8iMGTNk4MCB8tJLL5lr9/e//91cq7i4OOe1Sev/bLBetxdffFGSkpLMTWTevHnN37QxY8aYvmeKa5a5rFwjfdQbU1ehoaEmGeI6+j/iffYQ67OOWJ99xPqcId7bO96T8MOvSrG/+eYbU6KI9B07dkyeffZZWbdunRkcClm/ydQS1VdffdU811J//b7NnDnT3ATgeosWLZL58+fLggULpHr16rJ7925zo66D1XDNAOQEsT5riPU5Q6zPGeK9vdGk30tuuukmU0KWerRUfR4VFeWz4/JXffv2lRUrVsjGjRulbNmyztf1WmlzybNnz7ptH8zXUZvxnT59Wu68805TKqjL5s2bZcqUKeZnLUnkml1PR0ytVq2a22tVq1aVo0ePmp+ta8P/2f8ZNGiQKfV/9NFHzSjHXbp0kQEDBpgRtxXXLHNZuUb6qP+nXV29etWM5Mt19H/E+6wj1mcdsT5niPU5Q7y3d7wn4fcSbTpUr1490x/GtdRRn8fExPj02PyJjnmhNwBLly6VDRs2mOlAXOk1zJcvn9t11Kl89A93sF7H5s2by969e03pq7VoabY2u7J+5ppdT5uPpp4GSvuqlS9f3vys3z39Y+t63bR5m/apCtbrdvHiRdOvzJUmNvq3THHNMpeVa6SPetOuN/gW/Xuo11n7/sG/Ee8zR6zPPmJ9zhDrc4Z4b/N477XhAGGm6dGRGePj482ojD179jTT9Jw8edLXh+Y3evfubaav2LRpk+PEiRPO5eLFi27Tzuj0PRs2bDDTzsTExJgF/+M6cq/imqU9rVFoaKiZeubgwYOO+fPnOwoVKuSYN2+e23Qq+n/0448/duzZs8fRrl27oJtyxlVcXJzjlltucU7To9PQ3HTTTY7Bgwc7t+Ga/XcU7a+++sosGlYnTJhgfv7xxx+zfI10mp66deuaaaQ+//xzMyo30/IFDuJ9xoj1nkGszxyxPmeI9/aO9yT8XjZ16lTzx1jn59Vpe3TORfyP/mdJa9H5ei36n+SZZ55xFC9e3PzR7tChg7lRQPo3AVyztC1fvtxRo0YNc2NepUoVxzvvvOO2XqdUGTZsmCMyMtJs07x5c8eBAwccwSopKcl8r/RvWFhYmOO2224z888mJyc7t+GaORwbN25M8++Y3kBl9Rr99ttvJuDrvMfh4eGOJ554wtxYIHAQ79NHrPcMYn3WEOuzj3hv73gfov94r/0AAAAAAADwBfrwAwAAAABgQyT8AAAAAADYEAk/AAAAAAA2RMIPAAAAAIANkfADAAAAAGBDJPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA2R8AM21aRJE+nfv7+vDwMAAHgR8R5ARkj4AZtasmSJjB49Okvb/uc//5GQkBDZvXu32N3f//53qVevnhQoUEDq1Knj68MBAOCGEO+v9/XXX8tjjz0m0dHRUrBgQalatapMnjzZ14cF+AQJP4LWyy+/bIJebpW+62LZtGmTee+PPvrIa+9ZokQJKVq0qPm5W7ducuutt4o/O3/+vDz11FMSFRVlrk1GtRV6LnpOOfXkk0/KX//61xz/PgAgMNg91rvGe2L9/+zcuVNKlSol8+bNk3379sk//vEPGTJkiLz11ls3cPRAYCLhhy3Ex8ebwGEtYWFhUqZMGYmNjZUpU6bIH3/84ZH3OX78uLl58MeS8dTH5trET282zp07ZxJdvSkoV66cvPPOO87frVChgnmsW7euuX6uNyzp0SDcvn17GTlypNx8880SHh4uvXr1ksuXLzu3Wb16tTRu3FiKFSsmJUuWlIceekgOHz58XU3Dhx9+KNWqVZNZs2ZJo0aNZPr06fLtt9/KLbfcIoUKFZKaNWvKBx984Pb+uu9+/fqZcyxevLhERkbKu+++KxcuXJAnnnjCnGfFihVl1apVbr9XpUoVKVy4sNx22205vtZ2M2DAALnzzjvNTaNeb60J0e+S3pgBgL8g1qd9bK7x/qeffpJXX33Vb+P9448/bmL9r7/+Kj169JC2bduamviM4r0eY3bj/aVLl8w53n///Sbe6/vqttoaAmI+G/3/o5/Jjh07fH048DISftjKqFGj5J///KfMmDHDBAelAUIDyJ49e9y2HTp0qPz555/ZDrQa8LJ7E7B27VqzeFNGx6bBV/+o169fX7766it55plnpHfv3nLgwAGzfvv27ebxs88+kxMnTmQ5IK5fv94k5lqLoQFaf0+PwaLBeODAgSaY6LZ58uSRDh06SEpKitt+XnzxRRN4tKm9fnYPP/ywPPDAA/Lpp5/KN998Iz179pQuXbo4j1OP+4477pA5c+bITTfdZF7Xz1vP6S9/+YvcfffdsmvXLmnZsqX5vYsXLzrfSwsT9KYR/5OYmCj33nuv+ey0yWPTpk1l3Lhx8uCDD173WQGArxHr0z42TYI1cX7zzTf9Nt5rUq4F7wcPHjS/rz9r7E8r3usx6zmp7Mb7tGK9VnxowTb+W9AfGhrq68NAbnEANjB79myHfp0TExOvW7d+/XpHwYIFHeXLl3dcvHjxht5H96/vo++XFRcuXEjz9Y0bN5r9LF68+IaOJ6Nju//++x3PPvus+VnP/fHHH3dum5KS4ihVqpRjxowZ5vmRI0fM73711VdZfr+4uDhHiRIl3M5R91ekSBHHtWvX0vydX375xbzP3r173d530qRJjgoVKjjatGmT7vvpuueee875XM+vcePGzudXr151FC5c2NGlSxfnaydOnDD7T0hIcL5WvXp187sjRoxw1K5d2+Fvzp8/7/AHb7zxxnXXDgB8iVif9rEFUrwvWbJkhrHeU/HeivWWL774whEaGupYs2aNI9hj/erVqx358+d3DB06NN3/T7AXavhhe82aNZNhw4bJjz/+aPpyZdSvb926dc4maUWKFJHKlSvLSy+9ZNZpqXaDBg3Mz9oszGpSaJUga5OzGjVqmH5j9913n2maZv1u6n59lmvXrplttC+bNjPXmu1jx45lqQ+b6z7TOrbNmzfL/v37zWvadE5Lzy26Xvu2zZ071wxoo+ep9LnDoX//xW3bvn37yrJly8z56WB31atXl59//llq165tztMSExNjmoFb56Al+FrCr83ttLRfmwKq999/3+09tGbmyJEj5hit66o1BVpbo6Xx+lmsWbNGjh496rwm3333ndSqVcs8189AS6p1O202qO+j11NrANTp06edv6d9+fTaaM2CDuqTleaMqa/F/PnzzTWzWiVs2bLFbTv9rmmtim6jgwVpCwutidBjS6t5qh6Pbq+fSdmyZXO0j88//9wMSKjnrt/fp59+2jS3PHv2rHTt2tU0g9Rl8ODB133GabH6gervA4C/C9ZYr8vJkyfNa/r72qTfio1Ka7y1lv2FF14w8Vuvk8pqrNem+iqzeL9161bTdD5v3rxmX9rsXllxW9er3377zS3Wa9NyHXAwrXjvek30nKx4p/vSewptZaDXU+8z9LkV711jvfU+2rR/xIgRpiVAMMf6K1euyLPPPmuW22+/PdNrAXugLQeCgjbz0mCrTe20z1haNDhonzMNKtpcUIPdoUOH5IsvvjDrtV+zvj58+HDT5EybQCttTmbRQNaqVSt59NFHTX8xK+ClZ8yYMeYPuAZiDVKTJk2SFi1amKZ6+oc/q9I6Nt23Nu1LiwYBDSbW4Dma9GsgnDhxolmnj640wGhg1aCkybv2ldQbD+1vn5E2bdqY5ofJyckmEGlfy7Fjx5r9ly9fXtq1a2e2e+WVV8y5awB87rnnZPny5ab5njYv15sADehaKODaX1Dly5fP7fnvv/9ubnz0XPT8dJ/KalKoz7UpoN5Q6DXTGzYdyCc7NGDrmAMacPU7os0Gtem7NjPUmySrifyXX35pvgd6Tnos2vRUb9q0EMb1pknpddXgrZ+fNovMyT70vPRmUgsy9GZI+2zqzYDuQ/twap/OlStXyuuvv26OUz8PV1evXjU3DHqNtVmlNoPVz/quu+7K1vUBAF8Jxlivpk6dmmZs1HiuhQta6K/92bt37y5Lly41Beza7N+1YCS9WN+pUyfTzz4j2mVCE2pNJjt27GiSV03aNcb+61//ktatWzuTS41LmpBrrFeaVOv76DXJarzXeKfvpfvVMQn0d/Pnz++M966xXgsMNP7p/YjGtWCP9Xpt9F5JrwXjGQQRXzcxALzdzM8SERHhqFu3rvO5Nul2/S8wceJE81yboeWkmZ82HdN1M2fOTHOda9Myq5nfLbfc4khKSnK+vmjRIvP65MmTna9p8zxtTpfZPjNq0q9N34oXL+7cdtmyZWbbpk2bmuc///yzed68eXNHSEiI49ChQ85t9XVt+uX62tdff21eL1SokFvTST13q4nfr7/+arbRZd68eWb9v//9b/O8cuXKZjtt6mc1LdTztJr5PfTQQ44nn3zSuV/dX6VKlRzt2rVzXpPIyEjn+Vmff1hYmGPChAnO3xswYIDb+99ok37rfHbs2OF87ccffzTv26FDB+draTUn1WaG+rtz5851vmYdtzZV1CaKrrK7j9jYWNN00xITE2M+y169ejlf0/coW7as2/cm9b6tRT8j/Z4CgL8g1mfcpF9/P2/evOYcXWN9VFSUuQ6u8T47sf6uu+4yTfrTi/djxowx22kT8dTnrvFRz91q0l+6dGm3Jv0ZxXvrmljnZ33+LVq0cJQrV855nhrr9bx13dKlS52xvn79+qY7w6BBgxzZYddYr90eihYt6nj77bfd9kmTfvujST+Chpb0ZjSCr5aQqo8//jjHA5VpKbA2s8sqLXm1ps5TjzzyiJQuXdqUznqTtf+GDRuaR21eprUMOsqtxrrUpb5aE+Ha9EtrRrS0XUvYtcZAS6F1n1qzrs3gtGmdNinTbbQpnDZB3LBhg2mmr7SkXVsXbNu2Lc3jq1SpkmlyqSXWOkiQNlk7depUlj5j16abVu3HL7/84nxNaw30vbUlgNZKaA2LLqlrE9KjzRi1aZ9FS9S1pYLWZmizTeVaY6PXSGuD9Nrqd0wHF0pNa6K0GaSr7O5DPwfXc9fPVj9Lfd2i76EDOf3www/X/b7OkqDXXJtzalNArWVhlH4AgYZY/z+6f/27r4PdWTTe6/FrfNAabB3ILqNYryPyayzQGJlevLemH9SWA9paQuO9xhFrtHytLU9PTuK9tm5IHeut+GvRloXabU+b8Ou9h8Z8XVzvB4It1msLE+12oS07EVxI+BE0NGC5BtzUdF72e+65x/wh1OZ52rxq0aJF2boh0Cb0VrOyrNBA50r/iOsf+tR9tzxN+4xpMq5BX2n/d21St2LFCvNcf3algS41PU9tVqbnoP0Y9fpp00HtL6n0JsCaZ1dvGnREWG1ipqy+azoOQFq0qZlOE6dTLWmzNt2PTgmUmdQjzmqhg3JNXPU9tZnh22+/Ld9//71p5qiLdj3IyWemdMYA7Sdp3UhoQYI22dOuEnqN9WZLm/Fpk3nXm6vU0yS5yu4+Un9GERER5lF/P/Xr2pwvNb2p05s9vaF57bXXTHNL/VlvmAAgUBDr3WO9dqVzTTI1To4fP978rF3arK516cV6jaOaPDdv3jzdeK/97bX5uCao+uga763jSE9O4n3q47RivSuNk5pAa7cFLVyxFmsMhGCL9dr8X2e20C6V1ngHCB704UdQ0EFs9A+nBtj0aCmrDsiyceNGM6CMDlSjpd86wI32B0xdKpvePjwt9WBDFi1hzuiYtMTdtTbB9bnSoGoFa6U3PzpQjJYq6w2Qq4zeR/uRuU7N40r7nmkps+tAN1oSbSWROjhNWoPK6MA9WtOcEe1LZ/XRt2jff32/1HRwJteAqwE19fXwJO1jN3v2bNMPUWsJNPDq56jXNa2byrS+N9ndR3qfUVqvZ2XQPu2Hqf1hFy5caAZrAgB/F4yxXlnxTPura4G6xg1Xqafwi4uLM4O2Pf/8826JeWbvk1G813uH1IPa6fvWqVPH/Kz99jX2WAPCZiXeW9ta52cNnKjHmVZhiX6m1gCH2pJBW64R6/9LW1xoSwi9B7KunY7toHR6Ri20SavAB/ZAwo+goKWaSkuQM6KlnlqKrcuECRPM4CdaAq5BRGs/0wvIOaWj2Kf+46zN4VxH2NWS67RGStcSc22aZcnOsemAeToHrzZ7dK0J0ZHvrfWeoPvROZE1aLmWKHv6fbLjRj/D1J+Z0pYCWrhhzULw0UcfmRsqHRTJos0aszPivSf2cSO0Rkc/t7RqGADAHxHr3RHrc85usV4Tev0updXKQFtraEEDs/LYF206YHval0ynfNE/cp07d053uzNnzlz3mlUyrcmP0n7NylN/FHUaPNe+hvqHX0tadfRfi/an06ZYrn3Mtel96il9snNsOrKt1hq89dZbbq9rUy8Nkvr+2g9SF6XN363nuvz73//O0vnp+2ifOa09cR0NXkcU1v3oqL65Ta/TjXx+CQkJbv3q9HPQvqDaT9AqYdfH1LXoes6p+xhmxBP7yAq9FtrsMbX33nvPPKbVYgIA/A2xPmexXqUX63Wxrklm70Os9+9Yr6P56wwNrou2LlBvvPGGmS0B9kUNP2xl1apVpkRZA40O+qI3ADoYjJYuf/LJJ2YAufToVDfaHE0HlNPtdeocnYZFm8dZTcI1IGuztZkzZ5rScg0oOmBKWiWmWaFN2XTfOviPHq82UdemiK7TCWlTe7050Cbs//d//2fmrNU+aannT83OsekUO02bNjU1Gtq0S5tsa1NGDWbarEz3ZTUB1H5s2rRMB+hx7b+ozeIzm8NeB9bRGwhtZqj95rV5np6LTn+k55pRP0tv0UF4dMobnQpQr7U2+7PmJc4K7Z+otUeuU/Uo12aOOuWT1jRpibk2KdQbB61l0S4MWeWJfWSFNnfUc9FuH/pZ682mFujowI2a7OuUUwDgT4j1nov1SuN9WrFeaSsIPXarOX1aiPX+H+u1oCI1q0BEC2Qo3Lc3En7Yig58onQwHQ2wOqerBhsNspkFHG3SpAHx/fffN/2aNKHVP4L6x90aFEUHutP54YcMGSK9evUyNxva9yqnNwE6X7A2g9O56bX0XwOrBhXXeVc14GhTL212qAFa/yhrqb81h60lO8emTe70pkivl5bI63YaoLUvn7Vf1z6Qev4Z9YlMj/ZX04TyxRdfNMeWlJRk5ufV99MbA1/Qc9ZmbTpokV5z/YyzcxOg22s/O/1eaBM5DdB6I+TaNHPy5Mmm1F5LzLVpng4QpQE8s2amrjyxj6zQ/yN6Q6g3gFrjpDUNehOo12nQoEHZGpgKAHIDsd5zsV5Z8T2tWJ+VpvHEev+P9QhuITo3n68PAgACgd749OnT57rmkQAAwB6I9bAb+vADAAAAAGBDNOkHEPR0sKHMmitaTT0BAEDgIdYjWJHwAwh6pUuXznC9TpmT0YBFAADAvxHrEaxI+AEEPR3dOSNlypQxjwx5AgBAYCLWI1gxaB8AAAAAADbEoH0AAAAAANgQTfqzICUlRY4fP27mds3KfKQAAHibNtDT+aW1GarOt40bR7wHANgt1pPwZ4EG/+joaF8fBgAA1zl27JiULVvW14dhC8R7AIDdYj0JfxZoSb91ocPDw319OAAASFJSkklOrRiFG0e8BwDYLdaT8GeB1axPgz83AAAAf0LTc88h3gMA7Bbr6fQHAAAAAIANkfADAAAAAGBDJPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA2R8AMAAAAAYEMk/AAAAAAA2BAJPwAAAAAANkTCDwAAAACADZHwAwAAAABgQ6G+PgAAwad7fGK662Z1a5CrxwIAgDfjmiK2AfAVv6/h//nnn+Xxxx+XkiVLSsGCBaVmzZqyY8cO53qHwyHDhw+X0qVLm/UtWrSQgwcPuu3jzJkz0rlzZwkPD5dixYpJ9+7d5fz58z44GwAAAAAAcodfJ/y///673HPPPZIvXz5ZtWqV7N+/X958800pXry4c5vx48fLlClTZObMmbJt2zYpXLiwxMbGyqVLl5zbaLK/b98+WbdunaxYsUK2bNkiPXv29NFZAQAAAAAQ5E36X3vtNYmOjpbZs2c7X6tQoYJb7f6kSZNk6NCh0q5dO/Pa3LlzJTIyUpYtWyaPPvqofPvtt7J69WpJTEyU+vXrm22mTp0qrVu3ljfeeEPKlCnjgzMDAAAAACCIE/5PPvnE1Nb/5S9/kc2bN8stt9wizzzzjPTo0cOsP3LkiJw8edI047dERERIw4YNJSEhwST8+qjN+K1kX+n2efLkMS0COnTocN37Jicnm8WSlJTk9XMFAgl9FQEAAAD/59dN+n/44QeZMWOGVKpUSdasWSO9e/eWv//97zJnzhyzXpN9pTX6rvS5tU4fS5Uq5bY+NDRUSpQo4dwmtbFjx5qCA2vRVgYAAAAAAAQSv074U1JS5M4775RXX31V6tata/rda+2+9tf3piFDhsi5c+ecy7Fjx7z6fgAAAAAABFWTfh15v1q1am6vVa1aVf71r3+Zn6OioszjqVOnzLYWfV6nTh3nNqdPn3bbx9WrV83I/dbvp1agQAGzAMh9dBcAAAAAgqCGX0foP3DggNtr33//vZQvX945gJ8m7evXr3frb69982NiYsxzfTx79qzs3LnTuc2GDRtM6wHt6w8AAAAAgB35dQ3/gAED5O677zZN+v/v//5Ptm/fLu+8845ZVEhIiPTv319eeeUV089fCwCGDRtmRt5v3769s0XAgw8+6OwKcOXKFenbt68Z0I8R+gEAAAAAduXXCX+DBg1k6dKlpk/9qFGjTEKv0/B17tzZuc3gwYPlwoULpn+/1uQ3btzYTMMXFhbm3Gb+/PkmyW/evLkZnb9Tp04yZcoUH50VAAAAAADeF+LQyeyRIe0moKP16wB+4eHhvj4cwO/72XsTffiB/yI2eR7XFDnF+DMA/DUu+XUffgAAAAAAkDMk/AAAAAAA2BAJPwAAAAAANuTXg/YBQGr0kwQAAACyhhp+AAAAAABsiIQfAAAAAAAbIuEHAAAAAMCGSPgBAAAAALAhEn4AAAAAAGyIUfoB5Gg0fAAAAAD+jRp+AAAAAABsiIQfAAAAAAAbIuEHAAAAAMCGSPgBAAAAALAhEn4AAAAAAGyIhB8AAAAAABsi4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhkj4AQAAAACwIRJ+AAAAAABsiIQfAADkmnHjxklISIj079/f+dqlS5ekT58+UrJkSSlSpIh06tRJTp065fZ7R48elTZt2kihQoWkVKlSMmjQILl69arbNps2bZI777xTChQoIBUrVpT4+PhcOy8AAPwRCT8AAMgViYmJ8vbbb0utWrXcXh8wYIAsX75cFi9eLJs3b5bjx49Lx44dneuvXbtmkv3Lly/Ll19+KXPmzDHJ/PDhw53bHDlyxGzTtGlT2b17tylQeOqpp2TNmjW5eo4AAPgTEn4AAOB158+fl86dO8u7774rxYsXd75+7tw5mTVrlkyYMEGaNWsm9erVk9mzZ5vEfuvWrWabtWvXyv79+2XevHlSp04dadWqlYwePVqmTZtmCgHUzJkzpUKFCvLmm29K1apVpW/fvvLII4/IxIkTfXbOAAD4Ggk/AADwOm2yrzXwLVq0cHt9586dcuXKFbfXq1SpIuXKlZOEhATzXB9r1qwpkZGRzm1iY2MlKSlJ9u3b59wm9b51G2sfaUlOTjb7cF0AALCTUF8fAAAAsLeFCxfKrl27TJP+1E6ePCn58+eXYsWKub2uyb2us7ZxTfat9da6jLbRJP7PP/+UggULXvfeY8eOlZEjR3rgDAEA8E/U8AMAAK85duyYPPvsszJ//nwJCwsTfzJkyBDTpcBa9FgBALATEn4AAOA12mT/9OnTZvT80NBQs+jAfFOmTDE/ay289sM/e/as2+/pKP1RUVHmZ31MPWq/9TyzbcLDw9Os3Vc6mr+ud10AALATmvQDsJXu8dc3GXY1q1uDXDsWACLNmzeXvXv3ur32xBNPmH76L7zwgkRHR0u+fPlk/fr1Zjo+deDAATMNX0xMjHmuj2PGjDEFBzoln1q3bp1J0KtVq+bcZuXKlW7vo9tY+wAAIBj5dQ3/yy+/bObqdV30BsHT8/YCAADvKFq0qNSoUcNtKVy4sInd+nNERIR0795dBg4cKBs3bjQtArRAQBP1Ro0amX20bNnSJPZdunSRr7/+2ky1N3ToUHMPoLX0qlevXvLDDz/I4MGD5bvvvpPp06fLokWLzJR/AAAEK7+v4a9evbp89tlnzufa/M+iQfzTTz818/bqDYNOwaPz9n7xxRdu8/ZqMz+d3ufEiRPStWtXU5Pw6quv+uR8AACAO506L0+ePKbgXkfO19H1NWG35M2bV1asWCG9e/c2BQFaYBAXFyejRo1ybqNT8uk9gd4bTJ48WcqWLSvvvfee2RcAAMEqxOFwOMSPa/iXLVsmu3fvvm6dDq5z8803y4IFC8w8u0pL9HXuXZ2CR2sFVq1aJQ899JAcP37cOXKvztOrTQh/+eUXMypwVugIv1qgoO9J/z4Ei8yaxgcqmvTDLohNnsc1RU7RnQyAv8Ylv6/hP3jwoJQpU8aM7Kul+jqFjs7Nm9m8vZrwpzdvr9YQ6Ly9devWTfM9tXZBFwvz8sKO7JrQAwAAAAiAPvwNGzaU+Ph4Wb16tcyYMUOOHDki9957r/zxxx8em7c3LVqooCUp1qIDCgEAAAAAEEj8uoa/VatWzp9r1aplCgDKly9vBuFJb4odT83Lq4MHudbwk/QDAAAAAAKJX9fwp6a1+XfccYccOnTIDMTniXl708K8vAAAAACAQBdQCf/58+fl8OHDUrp0aalXr55z3l5LWvP26ty/Om+vJfW8vQAAAAAA2JFfN+l//vnnpW3btqYZv460P2LECDM1z2OPPeY2b2+JEiVMEt+vX7905+0dP3686befet5eAAAAAADsyK8T/p9++skk97/99puZgq9x48aydetW87On5u0FAAAAAMCO/DrhX7hwYYbrdaq+adOmmSU92jpg5cqVXjg6AAAAAAD8V0D14QcAAAAAAFlDwg8AAAAAgA35dZN+ADnXPT7R14cAAAAAwIeo4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhkj4AQAAAACwIRJ+AAAAAABsKNTXBwAAual7fGK662Z1a5CrxwIAAAB4EzX8AAAAAADYEAk/AAAAAAA2RJN+AMhCc39Fk38AAAAEEmr4AQAAAACwIRJ+AAAAAABsiIQfAAAAAAAbIuEHAAAAAMCGSPgBAAAAALAhEn4AAAAAAGyIhB8AAAAAABsi4QcAAAAAwIZI+AEAAAAAsKFQXx8AAACAHXWPT8xw/axuDXLtWAAAwYkafgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhkj4AQAAAACwIRJ+AAAAAABsiIQfAAAAAAAbIuEHAAAAAMCGSPgBAAAAALChgEr4x40bJyEhIdK/f3/na5cuXZI+ffpIyZIlpUiRItKpUyc5deqU2+8dPXpU2rRpI4UKFZJSpUrJoEGD5OrVqz44AwAAAAAAckfAJPyJiYny9ttvS61atdxeHzBggCxfvlwWL14smzdvluPHj0vHjh2d669du2aS/cuXL8uXX34pc+bMkfj4eBk+fLgPzgIAAAAAgNwREAn/+fPnpXPnzvLuu+9K8eLFna+fO3dOZs2aJRMmTJBmzZpJvXr1ZPbs2Sax37p1q9lm7dq1sn//fpk3b57UqVNHWrVqJaNHj5Zp06aZQoC0JCcnS1JSktsCAAAAAEAgCYiEX5vsay19ixYt3F7fuXOnXLlyxe31KlWqSLly5SQhIcE818eaNWtKZGSkc5vY2FiTxO/bty/N9xs7dqxEREQ4l+joaK+dGwAAAAAAQZnwL1y4UHbt2mWS8NROnjwp+fPnl2LFirm9rsm9rrO2cU32rfXWurQMGTLEtB6wlmPHjnnwjADP6R6fmO4CAP5gxowZpjteeHi4WWJiYmTVqlUeH4tn06ZNcuedd0qBAgWkYsWKpvseAADBzq8Tfk20n332WZk/f76EhYXl2vvqzYJ1Y2ItAAAg+8qWLWsG3dVWeTt27DBd8Nq1a+dsZeeJsXiOHDlitmnatKns3r3bDO771FNPyZo1a3xyzgAA+Au/Tvj15uD06dOmxD40NNQsejMwZcoU87PW1OsNwNmzZ91+T2sGoqKizM/6mLqmwHpubQMAALyjbdu20rp1a6lUqZLccccdMmbMGFOTr2PteGosnpkzZ0qFChXkzTfflKpVq0rfvn3lkUcekYkTJ/r47AEA8C2/TvibN28ue/fuNaX11lK/fn0zgJ/1c758+WT9+vXO3zlw4IBp+qdNBpU+6j604MCybt06U2tfrVo1n5wXAADBSGvrtavehQsXTHz21Fg8uk3qcX50G2sf6WGQXgCA3YWKHytatKjUqFHD7bXChQubfn7W6927d5eBAwdKiRIlTBLfr18/cxPRqFEjs75ly5Ymse/SpYuMHz/e9NsfOnSo6S+oTfcBAIB3acG7xmbtr6+1+0uXLjWxWQvvPTEWT3rbaAL/559/SsGCBdM8Lh0faOTIkR49VwAA/Ilf1/BnhTbXe+ihh8wgP/fdd59ppr9kyRLn+rx588qKFSvMo95sPP7449K1a1cZNWqUT48bAIBgUblyZZPcb9u2TXr37i1xcXGmmb6vMUgvAMDu/LqGPy06Cq8rHcxP+/Hpkp7y5cvLypUrc+HoAABAalqLryPnK+2nn5iYKJMnT5a//vWvzrF4XGv5U4/Fs3379gzH4klvvB5t+Zde7b7Sln609gMA2FnAJfwA4CuZTXc4q1uDXDsWIJClpKSY/vOa/Ftj8WhLvfTG4tGB/nQsHp2SL62xeHSb1AX7uo21DwAAghUJPwAA8GqzeR1ZXwfi++OPP2TBggWmtZ5OmRcREeGRsXh69eolb731lgwePFiefPJJ2bBhgyxatEg+/fRTH589AAC+RcIPAAC8RmvmdeycEydOmAS/Vq1aJtl/4IEHnGPx5MmTx9Twa62/jq4/ffr068bi0b7/WhCgg/fqGACuY/HolHya3A8YMMB0FShbtqy89957Zl8AAAQzryX8P/zwg9x2223e2j0AAPAyT8TyWbNmZbjeU2PxNGnSRL766qscHycAAHbktVH6dXCepk2byrx588w0PAAAILAQywEACGxeS/h37dplmu1pvzwdPffpp5++bpRdAADgv4jlAAAENq8l/HXq1DH96I4fPy7vv/++6bvXuHFjqVGjhkyYMEF++eUXb701AADwAGI5AACBzWsJvyU0NFQ6duwoixcvltdee00OHTokzz//vERHRzsH8QEAAP6LWA4AQGDyesK/Y8cOeeaZZ6R06dKmNkBvEA4fPmzmx9Uag3bt2nn7EAAAwA0glgMAEJi8Nkq/3hDMnj1bDhw4IK1bt5a5c+eaR516x5pCJz4+Xm699VZvHQIA5Kru8YnprpvVrUGuHgvgCcRyAAACm9cS/hkzZsiTTz4p3bp1MzUCaSlVqlSm0/UAAADfIJYDABDYvJbwazO/cuXKOWsBLA6HQ44dO2bW5c+fX+Li4rx1CICta4wBwNuI5QAABDav9eG//fbb5ddff73u9TNnzpgmgAAAwL8RywEACGxeS/i19D8t58+fl7CwMG+9LQAA8BBiOQAAgc3jTfoHDhxoHkNCQmT48OFSqFAh57pr167Jtm3bzLy+AADAPxHLAQCwB48n/F999ZWzVmDv3r2mb59Ff65du7aZzgcAAPgnYjkAAPbg8YR/48aN5vGJJ56QyZMnS3h4uKffAgAAeBGxHAAAe/DaKP06by8AAAhcxHIAAAKbRxP+jh07Snx8vKkJ0J8zsmTJEk++NQAA8ABiOQAA9uHRhD8iIsIM8GP9DAAAAguxHAAA+wj1VtM/mgECABB4iOUAANhHHm/t+M8//5SLFy86n//4448yadIkWbt2rbfeEgAAeBCxHACAwOa1hL9du3Yyd+5c8/PZs2flrrvukjfffNO8PmPGDG+9LQAA8BBiOQAAgc1rCf+uXbvk3nvvNT9/9NFHEhUVZWoG9MZhypQp3npbAADgIcRyAAACm9cSfm0CWLRoUfOzNv3TkX7z5MkjjRo1MjcLAADAvxHLAQAIbF5L+CtWrCjLli2TY8eOyZo1a6Rly5bm9dOnT5upfgAAgH8jlgMAENi8lvAPHz5cnn/+ebn11lulYcOGEhMT46whqFu3rrfeFgAAeAixHACAwObRaflcPfLII9K4cWM5ceKE1K5d2/l68+bNpUOHDt56WwAA4CHEcgAAApvXEn6lg/vo4kpH+AUAAIGBWA4AQODyWsJ/4cIFGTdunKxfv9709UtJSXFb/8MPP3jrrQEAgAcQywEACGxeS/ifeuop2bx5s3Tp0kVKly4tISEh3norAADgBcRyAAACm9cS/lWrVsmnn34q99xzT473MWPGDLP85z//Mc+rV69uBhBq1aqVeX7p0iV57rnnZOHChZKcnCyxsbEyffp0iYyMdO7j6NGj0rt3b9m4caMUKVJE4uLiZOzYsRIa6tXeDAAABDxPxHIAAGDDUfqLFy8uJUqUuKF9lC1b1jQl3Llzp+zYsUOaNWsm7dq1k3379pn1AwYMkOXLl8vixYtNDcTx48fNHMGWa9euSZs2beTy5cvy5Zdfypw5cyQ+Pt4UGgAAAO/HcgAAYMOEf/To0SaxvnjxYo730bZtW2ndurVUqlRJ7rjjDhkzZoyppd+6daucO3dOZs2aJRMmTDAFAfXq1ZPZs2ebxF7XW9MG7d+/X+bNmyd16tQxLQP0uKZNm2YKAQAAgHdjOQAA8B2vtWt/88035fDhw6Z5vc7fmy9fPrf1u3btytb+tLZea/J1ACGdB1hr/a9cuSItWrRwblOlShUpV66cJCQkSKNGjcxjzZo13Zr4a7N/beKvrQTSm0NYuwfoYklKSsrWsQJZ1T0+0deHAAC5FssBAIBNEv727dt7ZD979+41Cb7219fa/aVLl0q1atVk9+7dkj9/filWrJjb9npTcvLkSfOzProm+9Z6a116tI//yJEjPXL8AAAEKk/FcgAAYLOEf8SIER7ZT+XKlU1yr034P/roIzPonvbX96YhQ4bIwIED3Wr4o6OjvfqeAAD4G0/FcgAAYLM+/Ors2bPy3nvvmQT6zJkzzuZ/P//8c5b3obX4FStWNH30tea9du3aMnnyZImKijL98PU9XJ06dcqsU/qoz1Ovt9alp0CBAhIeHu62AAAQjDwRywEAgM0S/j179piB9l577TV54403nIn5kiVLzE1DTqWkpJj+9VoAoH0J169f71x34MABMw2fdgFQ+qhdAk6fPu3cZt26dSaB124BAAAg92M5AAAI8IRfm8R369ZNDh48KGFhYc7XddT9LVu2ZGkfejOh2/7nP/8xibs+37Rpk3Tu3FkiIiKke/fu5n02btxoBvF74oknTJKvA/apli1bmsS+S5cu8vXXX8uaNWtk6NCh0qdPH1OLDwAAvBvLAQCADfvwJyYmyttvv33d67fcckuGA+a50pr5rl27yokTJ0yCX6tWLZO0P/DAA2b9xIkTJU+ePNKpUydT668j8E+fPt35+3nz5pUVK1aYUfm1IKBw4cJmDIBRo0Z58EwBALAnT8RyAABgw4Rfa9DTms7u+++/l5tvvjlL+5g1a1aG67W2Ydq0aWZJT/ny5WXlypVZej8A8NcpGmd1a5BrxwJ4MpYDAAAbNul/+OGHTU36lStXzPOQkBDTv/6FF14wNfIAAMC/EcsBAAhsXkv433zzTTl//rypAfjzzz/l/vvvN6PtFy1aVMaMGeOttwUAAB5CLAcAILB5rUm/9rnXEfG/+OILM2Ce3jDceeed0qJFC2+9JQAA8CBiOQAAgc0rCb9OnRcfH2+m7dER9rUJYIUKFSQqKkocDod5DgAA/BexHACAwOfxJv16E6B9/p566in5+eefpWbNmlK9enX58ccfzdQ+HTp08PRbAgAADyKWAwBgDx6v4dfaAJ2bd/369dK0aVO3dRs2bJD27dvL3LlzzXR7AADA/xDLAQCwB4/X8H/wwQfy0ksvXXeDoJo1ayYvvviizJ8/39NvCwAA/DCWjx07Vho0aGAG+itVqpQpLDhw4IDbNpcuXZI+ffpIyZIlpUiRImYGgFOnTrlto7MDtGnTRgoVKmT2M2jQILl69arbNps2bTJjDOh0gjq4oBZcAAAQzDye8O/Zs0cefPDBdNe3atXKDPwDAAD8kydj+ebNm00yv3XrVjMAoE7x17JlS7lw4YJzmwEDBsjy5ctl8eLFZvvjx49Lx44dneuvXbtmkv3Lly/Ll19+KXPmzDHJ/PDhw53bHDlyxGyjhRS7d++W/v37my4Ja9asyfF1AAAg0Hm8Sf+ZM2ckMjIy3fW67vfff/f02wIAAD+M5atXr3Z7rom61tDv3LlT7rvvPjl37pzMmjVLFixYYFoPqNmzZ0vVqlVNIUGjRo1k7dq1sn//fvnss8/Me9epU0dGjx4tL7zwgrz88suSP39+mTlzphlUUKcSVPr7n3/+uUycOFFiY2Nv6HoAABCoPF7Dr6XwoaHplyPkzZv3uiZ4AADAf3gzlmuCr0qUKGEeNfHXWn/Xqf6qVKki5cqVk4SEBPNcH3XgQNdCCE3ik5KSZN++fc5tUk8XqNtY+0hLcnKy2YfrAgCAnYR6Y2RfHcFX+8+lF1wBAID/8lYs16n+tKn9PffcIzVq1DCvnTx50tTQFytWzG1bTe51nbVN6hYH1vPMttEk/s8//5SCBQumOb7AyJEjc3QuAAAEZcIfFxeX6TaM6gsAgP/yVizXvvzffPONaWrvD4YMGSIDBw50PtfCgejoaJ8eEwAAfp3wa787AAAQuLwRy/v27SsrVqww0/2VLVvW+XpUVJQZjO/s2bNutfw6Sr+us7bZvn272/6sUfxdt0k9sr8+Dw8PT7N2X2kLhvRaMQAAYAce78MPAADg2j1Ak/2lS5fKhg0bzMB6rurVqyf58uWT9evXO1/Taft0Gr6YmBjzXB/37t0rp0+fdm6jI/5rMl+tWjXnNq77sLax9gEAQDDyeA0/AACAazN+HYH/448/lqJFizr73EdERJiad33s3r27aVqvA/lpEt+vXz+TqOsI/Uqn8dPEvkuXLjJ+/Hizj6FDh5p9WzX0vXr1krfeeksGDx4sTz75pClcWLRokXz66ac+PX8AAHyJGn4AAOA1M2bMMCPzN2nSREqXLu1cPvzwQ+c2OnXeQw89JJ06dTJT9Wnz/CVLlrjNCqDdAfRRCwIef/xxM4bAqFGjnNtoywFN7rVWv3bt2mZ6vvfee48p+QAAQY0afgAA4NUm/ZkJCwuTadOmmSU95cuXl5UrV2a4Hy1U+Oqrr3J0nAAA2BE1/AAAAAAA2BAJPwAAAAAANkTCDwAAAACADZHwAwAAAABgQyT8AAAAAADYEAk/AAAAAAA2RMIPAAAAAIANkfADAAAAAGBDob4+ACDQdY9PzHD9rG4Ncu1YAAAA4J/3hdwTwheo4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhvw64R87dqw0aNBAihYtKqVKlZL27dvLgQMH3La5dOmS9OnTR0qWLClFihSRTp06yalTp9y2OXr0qLRp00YKFSpk9jNo0CC5evVqLp8NAAAAAAC5x68T/s2bN5tkfuvWrbJu3Tq5cuWKtGzZUi5cuODcZsCAAbJ8+XJZvHix2f748ePSsWNH5/pr166ZZP/y5cvy5Zdfypw5cyQ+Pl6GDx/uo7MCAAAAAMD7QsWPrV692u25JupaQ79z506577775Ny5czJr1ixZsGCBNGvWzGwze/ZsqVq1qikkaNSokaxdu1b2798vn332mURGRkqdOnVk9OjR8sILL8jLL78s+fPn99HZAUDWMa8vAAAAbFXDn5om+KpEiRLmURN/rfVv0aKFc5sqVapIuXLlJCEhwTzXx5o1a5pk3xIbGytJSUmyb9++NN8nOTnZrHddAAAAAAAIJAGT8KekpEj//v3lnnvukRo1apjXTp48aWroixUr5ratJve6ztrGNdm31lvr0hs7ICIiwrlER0d76awAAAAAAAjyhF/78n/zzTeycOFCr7/XkCFDTGsCazl27JjX3xMAAAAAgKDpw2/p27evrFixQrZs2SJly5Z1vh4VFWUG4zt79qxbLb+O0q/rrG22b9/utj9rFH9rm9QKFChgFgAAAAAAApVf1/A7HA6T7C9dulQ2bNggFSpUcFtfr149yZcvn6xfv975mk7bp9PwxcTEmOf6uHfvXjl9+rRzGx3xPzw8XKpVq5aLZwMAAAAAQO4J9fdm/DoC/8cffyxFixZ19rnXfvUFCxY0j927d5eBAweagfw0ie/Xr59J8nWEfqXT+Gli36VLFxk/frzZx9ChQ82+qcUHAAAAANiVXyf8M2bMMI9NmjRxe12n3uvWrZv5eeLEiZInTx7p1KmTGV1fR+CfPn26c9u8efOa7gC9e/c2BQGFCxeWuLg4GTVqVC6fDQAAAAAAuSfU35v0ZyYsLEymTZtmlvSUL19eVq5c6eGjAwAAAADAf/l1H34AAAAAAJAzJPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA359aB9gD/oHp/o60MAAAAAgGyjhh8AAAAAABsi4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIhB+wDA5gNLzurWINeOBQAAAP6DGn4AAAAAAGyIhB8AAAAAABsi4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAG2JaPsDHU6YBAAAAgDdQww8AAAAAgA1Rww8AuYCWHgAAAMht1PADAACv2bJli7Rt21bKlCkjISEhsmzZMrf1DodDhg8fLqVLl5aCBQtKixYt5ODBg27bnDlzRjp37izh4eFSrFgx6d69u5w/f95tmz179si9994rYWFhEh0dLePHj8+V8wMAwJ+R8AMAAK+5cOGC1K5dW6ZNm5bmek3Mp0yZIjNnzpRt27ZJ4cKFJTY2Vi5duuTcRpP9ffv2ybp162TFihWmEKFnz57O9UlJSdKyZUspX7687Ny5U15//XV5+eWX5Z133smVcwQAwF/RpB8AAHhNq1atzJIWrd2fNGmSDB06VNq1a2demzt3rkRGRpqWAI8++qh8++23snr1aklMTJT69eubbaZOnSqtW7eWN954w7QcmD9/vly+fFnef/99yZ8/v1SvXl12794tEyZMcCsYAAAg2FDDDwAAfOLIkSNy8uRJ04zfEhERIQ0bNpSEhATzXB+1Gb+V7CvdPk+ePKZFgLXNfffdZ5J9i7YSOHDggPz+++/pvn9ycrJpHeC6AABgJyT8AADAJzTZV1qj70qfW+v0sVSpUm7rQ0NDpUSJEm7bpLUP1/dIy9ixY00Bg7Vo338AAOyEJv0AI6gDQFAaMmSIDBw40Plca/hJ+gEAdkINPwAA8ImoqCjzeOrUKbfX9bm1Th9Pnz7ttv7q1atm5H7XbdLah+t7pKVAgQJm5H/XBQAAO6GGHwCCvAXLrG4Ncu1YAFcVKlQwCfn69eulTp06zlp27Zvfu3dv8zwmJkbOnj1rRt+vV6+eeW3Dhg2SkpJi+vpb2/zjH/+QK1euSL58+cxrOqJ/5cqVpXjx4j47PwAAfI0afgAA4DXnz583I+brYg3Upz8fPXpUQkJCpH///vLKK6/IJ598Inv37pWuXbuakffbt29vtq9atao8+OCD0qNHD9m+fbt88cUX0rdvXzOCv26n/va3v5kB+7p3726m7/vwww9l8uTJbs31AQAIRtTwAwAAr9mxY4c0bdrU+dxKwuPi4iQ+Pl4GDx4sFy5cMNPnaU1+48aNzTR8YWFhzt/Rafc0yW/evLkZnb9Tp04yZcoU53odcG/t2rXSp08f0wrgpptukuHDhzMlHwAg6JHwAwAAr2nSpIk4HI5012st/6hRo8ySHh2Rf8GCBRm+T61ateTf//73DR0rAAB2Q5N+AAAAAABsyO8T/i1btkjbtm1NPz2tBVi2bJnbeq010GZ7pUuXloIFC0qLFi3k4MGDbtvoSL6dO3c2o+8WK1bM9PHTPoUAAAAAANiV3yf82q+vdu3aMm3atDTXjx8/3vTjmzlzphnVt3DhwhIbGyuXLl1ybqPJvg7ioyP2rlixwhQi0K8PAAAAAGBnft+Hv1WrVmZJi9buT5o0SYYOHSrt2rUzr82dO1ciIyNNSwAdwffbb781g/8kJiZK/fr1zTZTp06V1q1byxtvvOEc4RcAAAAAADvx+xr+jOjUPidPnjTN+F1H6tV5eRMSEsxzfdRm/Fayr3R7HeVXWwSkJTk52cwD7LoAAAAAABBIAjrh12RfaY2+K31urdPHUqVKua0PDQ01I/5a26Q2duxYU3BgLdHR0V47BwAAAAAAgrJJvy8MGTLEOU+w0hp+kv7A1j0+0deHAAAAAAC5KqBr+KOioszjqVOn3F7X59Y6fTx9+rTb+qtXr5qR+61tUitQoIAZ0d91AQAAAAAgkAR0wl+hQgWTtK9fv96tNl775sfExJjn+nj27FnZuXOnc5sNGzZISkqK6esPAAAAAIAd+X2T/vPnz8uhQ4fcBurbvXu36YNfrlw56d+/v7zyyitSqVIlUwAwbNgwM/J++/btzfZVq1aVBx98UHr06GGm7rty5Yr07dvXjODPCP0AAAAAALvy+4R/x44d0rRpU+dzq299XFycxMfHy+DBg+XChQvSs2dPU5PfuHFjMw1fWFiY83fmz59vkvzmzZub0fk7deokU6ZM8cn5AAAAAACQG/w+4W/SpIk4HI5014eEhMioUaPMkh5tDbBgwQIvHSEAAAAAAP7H7xN+AIDvZrGY1a1Brh4LAAAAPCegB+0DAAAAAABpo4Yftq+hBAAAAIBgRA0/AAAAAAA2RMIPAAAAAIANkfADAAAAAGBDJPwAAAAAANgQCT8AAAAAADbEKP0AgBzPgDGrW4NcOxYAAABkDzX8AAAAAADYEDX8sEUtIwAAAADAHTX8AAAAAADYEAk/AAAAAAA2RJN+AECOMagfAACA/6KGHwAAAAAAG6KGH36DgfkAAAAAwHOo4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAG2LQPgCATwbjZMo+AAAA76KGHwAAAAAAG6KGH7mGafcAAAAAIPeQ8AMAAAAAYMNuizTpBwAAAADAhqjhh8fQZB8AAAAA/Ac1/AAAAAAA2BA1/ACAgGw15E/94wAAAPwRCT8AwCfoBgQAAOBdJPzIFm7QAQAAACAwkPDDDQk9AAAAANgDg/YBAAAAAGBDQVXDP23aNHn99dfl5MmTUrt2bZk6darcddddvj4sAICHMeBf8CLWAwAQhAn/hx9+KAMHDpSZM2dKw4YNZdKkSRIbGysHDhyQUqVKSbCgyT4Au7iRv2cUCNgTsR4AgCBN+CdMmCA9evSQJ554wjzXm4FPP/1U3n//fXnxxRfdtk1OTjaL5dy5c+YxKSlJ/EGf+TszXD+tc710113+87wXjggA7KXLjI05/jubW6yY5HA4fH0oARnrcyPeZxZz/eW+AjeOzxpZ+S7wPbCXy7nwWXsk1juCQHJysiNv3ryOpUuXur3etWtXx8MPP3zd9iNGjNArysLCwsLC4vfLsWPHcjGi2ifWK+I9CwsLC4vYPNYHRQ3/r7/+KteuXZPIyEi31/X5d999d932Q4YMMU0CLSkpKXLmzBkpWbKkhISE5KhkJjo6Wo4dOybh4eE5PIvgxLXLOa5dznHtbgzXL3eunZb2//HHH1KmTJlcOz47xXpvxHtXwf7/gPPn/Dl/zp/zP3bD5++JWB8UCX92FShQwCyuihUrdsP71Q88GL/0nsC1yzmuXc5x7W4M18/71y4iIiJXjseuvBXvXQX7/wPOn/Pn/Dn/YBXuofO/0VgfFNPy3XTTTZI3b145deqU2+v6PCoqymfHBQAAPINYDwBAkCb8+fPnl3r16sn69evdmu3p85iYGJ8eGwAAuHHEegAAgrhJv/bRi4uLk/r165v5eHWqngsXLjhH8vUmbS44YsSI65oNInNcu5zj2uUc1+7GcP1yjmsXuLE+tWD/LDl/zp/z5/w5/wLiD0J05D4JEm+99Za8/vrrcvLkSalTp45MmTLFzNMLAADsgVgPAECQJvwAAAAAAASLoOjDDwAAAABAsCHhBwAAAADAhkj4AQAAAACwIRJ+AAAAAABsiITfy6ZNmya33nqrhIWFmVGCt2/f7utD8jtjx46VBg0aSNGiRaVUqVLSvn17OXDggNs2ly5dkj59+kjJkiWlSJEi0qlTJzl16pTPjtlfjRs3TkJCQqR///7O17h2Gfv555/l8ccfN9enYMGCUrNmTdmxY4dzvY5rOnz4cCldurRZ36JFCzl48KAEu2vXrsmwYcOkQoUK5rrcfvvtMnr0aHO9LFy7/9qyZYu0bdtWypQpY/5/Llu2zG19Vq7TmTNnpHPnzhIeHi7FihWT7t27y/nz53P5TJDVzzQtmzZtkjvvvNNM01SxYkWJj4+XYLoGev66XepFZ1Ow431LWhYvXixVqlQx94Qaa1auXCmBKCfnr9/31J+9XodANGPGDKlVq5b5e6xLTEyMrFq1Kig++5ycv50++6zee/vbd4CE34s+/PBDMyewzsO4a9cuqV27tsTGxsrp06d9fWh+ZfPmzSYh3bp1q6xbt06uXLkiLVu2NHMnWwYMGCDLly83/1l0++PHj0vHjh19etz+JjExUd5++23zR9gV1y59v//+u9xzzz2SL18+E6z2798vb775phQvXty5zfjx4820XjNnzpRt27ZJ4cKFzf9jLUgJZq+99poJ+joF2rfffmue67WaOnWqcxuu3X/p3zL9+68FwGnJynXSZH/fvn3mb+SKFStMstWzZ89cPAtk5zNN7ciRI9KmTRtp2rSp7N6929wYPvXUU7JmzRoJlmtg0cTwxIkTzkUTRjvet6T25ZdfymOPPWYK67766iuTJOvyzTffSDCcv9Lk0PWz//HHHyUQlS1b1iR5O3fuNBUEzZo1k3bt2pm/0Xb/7HNy/nb67LN67+133wGdlg/ecddddzn69OnjfH7t2jVHmTJlHGPHjvXpcfm706dPaxWhY/Pmzeb52bNnHfny5XMsXrzYuc23335rtklISPDhkfqPP/74w1GpUiXHunXrHPfff7/j2WefNa9z7TL2wgsvOBo3bpzu+pSUFEdUVJTj9ddfd76m17RAgQKODz74wBHM2rRp43jyySfdXuvYsaOjc+fO5meuXdr0/97SpUudz7Nynfbv329+LzEx0bnNqlWrHCEhIY6ff/45l88AmX2maRk8eLCjevXqbq/99a9/dcTGxjqC5Rps3LjRbPf777877H7fkpb/+7//M383XTVs2NDx9NNPO4Lh/GfPnu2IiIhw2FXx4sUd7733XtB99lk5f7t+9n+kc+/tj98Bavi95PLly6bkS5tmWvLkyWOeJyQk+PTY/N25c+fMY4kSJcyjXkctPXa9ltokply5clzL/09L2rX2yPUaKa5dxj755BOpX7++/OUvfzG1THXr1pV3333XrVZOm5u6Xr+IiAjTPSfYr9/dd98t69evl++//948//rrr+Xzzz+XVq1amedcu6zJynXSR23Gr99Vi26vMUVbBMD/6WeY+u+ztuIIxv8LderUMd1XHnjgAfniiy/EjvctwfYdyMr5K+2GVL58eYmOjs60RjiQurctXLjQtG7Qpu3B9tln5fzt+tn3Sefe2x+/A6G58i5B6NdffzX/CSIjI91e1+ffffedz47L36WkpJimjtrMukaNGuY1vRnOnz+/ueFNfS0Dse+fp+kfWu0yos2KUuPaZeyHH34wzdK1681LL71kruHf//53c83i4uKc1yit/8fBfv1efPFFSUpKMgVIefPmNX/vxowZY5qeK65d1mTlOulj6mbPoaGh5uaaaxkY9HNK6zPW/0N//vmnGbvB7jTJ124rWnCVnJws7733njRp0sQUWunYBna6b8nOdyDQ/w9n9fwrV64s77//vmn6rAUEb7zxhik41sRPm4gHmr1795oEV7te6fhIS5culWrVqgXNZ5+d87fbZ5/Zvbc/fgdI+OF3pWXan0VrCpG5Y8eOybPPPmv60NlpAJTcvFHRm89XX33VPNcafv3+6U2pJvxI36JFi2T+/PmyYMECqV69urNfsg7gxbUDkNZNvy4WveE/fPiwTJw4Uf75z39KoAr2+5asnr8mh641wPr5V61a1fR/1gFfA41+lzXuaQL70UcfmbinYxukl/TaTXbO326f/bEAvPemSb+X3HTTTabWK/Vo6Po8KirKZ8flz/r27WsGo9q4caNbiZ9eL+0icfbsWbftuZb/bbKvg0Bq7YjW+Omif3B1ADD9WUsPuXYZ1zilDk4ahI4ePWp+tq4R/4+vN2jQIFPL/+ijj5rRZrt06WIGiNTRmxXXLmuycp30MfVgr1evXjUj93MtA4N+Tml9xjqQVTDU7qfnrrvukkOHDond7luy8x0I5P/D2Tn/1HSwXC1kD9TPX1sC6mwb9erVM3FPB7CcPHly0Hz22Tl/u332OzO599YWj/72HSDh9+J/BP1PoH1cXWsT9XlGfVyCkY73o0FDmwNt2LDBTPPlSq+j/nFwvZY6yq8mZcF+LZs3b26aVWkpq7VojbU2q7Z+5tqlT5sgpp5KSPukaz8zpd9F/WPsev20Ca42QQ3263fx4kXTh9yVFnLq3znFtcuarFwnfdRCO73JsOjfSr3W2tcf/k8/Q9fPWGntULD/X9A4pQWvdrtvsft3ICfnn5omRXr/Eoiff1r077F2VbH7Z5+T87fbZ988k3tvvRfyu+9ArgwNGKQWLlxoRlqOj483oyz37NnTUaxYMcfJkyd9fWh+pXfv3mb0zk2bNjlOnDjhXC5evOjcplevXo5y5co5NmzY4NixY4cjJibGLLhe6pFCuXbp2759uyM0NNQxZswYx8GDBx3z5893FCpUyDFv3jznNuPGjTP/bz/++GPHnj17HO3atXNUqFDB8eeffzqCWVxcnOOWW25xrFixwnHkyBHHkiVLHDfddJMZjdzCtfvfSL5fffWVWTTsTpgwwfz8448/Zvk6Pfjgg466des6tm3b5vj888/NyMCPPfaYD88quGX2mb744ouOLl26OLf/4YcfzN+WQYMGmZlSpk2b5sibN69j9erVjmC5BhMnTnQsW7bM/K3du3eviVN58uRxfPbZZw473rfoues1sHzxxRcm3rzxxhvmOzBixAgzi45ei2A4/5EjRzrWrFnjOHz4sGPnzp2ORx991BEWFubYt2+fI9DoeemMBBr79G+2PtdZU9auXWv7zz4n52+nzz6r997+9h0g4feyqVOnmmQrf/78Zpq+rVu3+vqQ/I7eKKS16DQeFr3xfeaZZ8y0H3rT1KFDBxNckPkfHa5dxpYvX+6oUaOGKZyrUqWK45133nFbr9OmDRs2zBEZGWm2ad68uePAgQOOYJeUlGS+Z/r3TQP3bbfd5vjHP/7hSE5Odm7DtXOfjiz1ooUmWb1Ov/32m0nwixQp4ggPD3c88cQTJuGCf36m+qh/i1P/Tp06dcz9gP5/cY1xwXANXnvtNcftt99u/l6UKFHC0aRJE1MQbdf7Fj1361pYFi1a5LjjjjvMd0Cnafz0008dwXL+/fv3d94P69+61q1bO3bt2uUIRDolbfny5c253HzzzeZvtpXs2v2zz8n52+mzz+q9t799B0L0n9xpSwAAAAAAAHILffgBAAAAALAhEn4AAAAAAGyIhB8AAAAAABsi4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhkj4AZtq0qSJ9O/f39eHAQAAvIh4DyAjJPyATS1ZskRGjx6dpW3/85//SEhIiOzevVvs7LfffpMHH3xQypQpIwUKFJDo6Gjp27evJCUl+frQAADIEeJ95rG/bNmy5rzPnj3r68MBch0JP2BTJUqUkKJFi0qguXLlitf2nSdPHmnXrp188skn8v3330t8fLx89tln0qtXL6+9JwAA3kS8z1j37t2lVq1aufJegD8i4YfPvfzyy6bUNbeaveli2bRpk3nvjz76KFfev1u3bnLrrbfmehM/fc9XX31VnnzySXNTUK5cOXnnnXec21aoUME81q1b11yP0qVLS1RUlPk5vWaCei7t27eXkSNHys033yzh4eEmcb58+bJzm9WrV0vjxo2lWLFiUrJkSXnooYfk8OHD19U0fPjhh3L//fdLWFiYzJ8/35TGP/bYY3LLLbdIoUKFpGbNmvLBBx9cd379+vUzx1e8eHGJjIyUd999Vy5cuCBPPPGEOc+KFSvKqlWrnL+j2/Xu3Vvq168v5cuXl+bNm8szzzwj//73v7N9fbWwQI9dzyHQ/18AgN0E072FxmuNwTmJ967HnR5PxPt9+/aZ99P1+hgaGppuvJ83b54MHjzYtMLTgvqbbrrJxHv9Pb1PyEq8t8yYMcPU6j///PM3fJ2BQEXCD4+ykiBr0T/M2nw6NjZWpkyZIn/88YdH3uf48eMmmPtjkzR/PbY333zTJLpfffWVSXI18T1w4IBZt337dvOotd0aVH/55Rez/p///Kd06dIl3X2uX79evv32W3Nzowm5NivUGwKLBuOBAwfKjh07zLYauDt06CApKSlu+3nxxRflzjvvlEGDBpnvyqVLl6RevXry6aefyjfffCM9e/Y0x2Edp2XOnDnmRkBf1+PWY/7LX/4id999t+zatUtatmxpfu/ixYvpflZ6zFrYAADwT9xbZO/YshrvT5w4YWJgVtxovJ8+fbp51M/v73//u/n99OJ9165d5fXXX5dHHnnExHltjq+PqkePHlmO9/v375dRo0bJ3LlzzfF48hoDAcUBeNDs2bMd+rUaNWqU45///Kfj/fffd7z66quOli1bOkJCQhzly5d3fP31126/c+XKFceff/6ZrfdJTEw076Pvlx3JyclmsWzcuNHsZ/HixdnaT06P7fLly45Lly45csP999/vePbZZ83Pet0ff/xx57qUlBRHqVKlHDNmzDDPjxw5Yo75q6++cjRs2NBxzz33ZLr/uLg4R4kSJRwXLlxwvqb7K1KkiOPatWtp/s4vv/xi3mfv3r1u7ztp0iRHmzZtzHGmR9c/99xzbufXuHFj5/OrV686Chcu7OjSpYvztRMnTpj9JyQkuO3r0UcfdRQsWNCsa9u2bba/f9b76e/ptfSGESNGmOMDgGDHvUXGx3bvvfc6+vTpk+14n1WeiPd16tRxxvvMlC5d2uw7dbzX39fYmJV4r/datWrVMt8X18/k999/9/jnD/g7avjhFa1atZLHH3/cNLUaMmSIrFmzxpQmnz59Wh5++GH5888/ndtqsy4trfcmq8Q3f/78ZvGVfPnymcHifMG1/5qWsGsTQP08UtPXtMldVtSuXds0wbPExMTI+fPn5dixY+b5wYMHTVO92267zTQBtLozHD161G0/WhPh6tq1a2YAIm3ap30TixQpYr5DqX/P9Zzy5s1rmhHq71i02Z91Tq4mTpxoagQ+/vhj0+RQayWySmsxrPfT722wNbvX2hqtkQGA3Ma9Rdq09lrPN7vxPjtuNN5r0/2sxvuTJ09eVyOf3Xiv34+qVaua74udpNdiEcgICT9yTbNmzWTYsGHy448/mv5ZGfWzW7dunbMvmP7xr1y5srz00ktmnTYna9CggflZg77VxE+b/Cntj1ajRg3ZuXOn3HfffSZAWb+bup+da8DRbTQoFi5c2Nw4WEHMosFL+7Gl5rrPzI4trT78mkA+99xzpq+aFgboub7xxhtateu2ne5HR5RftmyZOT/dtnr16qbfXFZokqYD12hQ1JsgHbROk15X2qfvyJEjpmmddeyZ9VHXz/Kuu+4y1/nee+81r23ZssU8tm3bVr777jvT50/f0+rvp7+j19zy7LPPmvfU74bVt0+/Fy+88IJs3LhRtm3bZvodrly50jm6vibqqb83es2WL19umvlrnz4doE/po+7Pop+z3hhq3z49P33UY9+6dWuazUg3b95smkWWKlXKNC10XZf6+mgfQu0ioO+vNz36fViwYIFzvY4XoM0QtV+ldS4DBgxwu1HNKf1+6f+XH374wTSV1O+yNnvVJo2pv0/6HdOmkHrTVLBgQdOkMq3+ptb3Tvta6vdNj9n6zmV3H4sXL5Zq1aqZbfVmce/evWb922+/bfpf6ndE/y95a1wEAPbDvYWYOPv++++7VS643lvo31rt+uCNe4tff/3V3Ftocr1o0SKzP022NW4rvc/Q16zz1uvvGju16f7kyZNNvNfPT49bj1Fnz7HOU/vg6zmlPm59TbstaEFQRESEeX3o0KEmlm/YsMHEHL2f0AKCpk2bOsfy0XPU3/n666+d+8vsGmfGuifQ+5+nn37axEW9B9DuCb///rvbtlrZ0KZNG+eMQbfffrsp9HC9L8rsO5fdfezZs8fcm+g+NN5asVrvbxo2bGjisv5/0AI02A8JP3KV1R987dq16W6jA7voYC/JyckmUdG+aBokv/jiC7Neg4q+rrSvl/Yz10X/GFq0JFn/mNepU0cmTZrk/EOfnjFjxpiEUwOO9i3Tm4IWLVpkOwnLyrG50qCm56Y1zjpd3IQJE8wfXO3Lnlat8+eff24Sz0cffVTGjx9vkvhOnTo5S84zqpWdNm2aOZbOnTubAKsBUAOGBlqrZkKPXZNlvW7WsWuynh4NqvqZatDV39VrrsFV++HpMWmfQU189YZGB+DRa6u0/5/227c89dRT5j31vfU99Wf9zLRkXkvwNSE+dOiQKayYOnWqGTzo559/vm6AHn1PTahbt24tr732mglg6X3HNMHXYK81EkpvFjUwWjcprvSaa1/A4cOHux13WgFfA/CZM2fMDc+4cePMubjeOOkNiJbQa/9DPRdNzPVRbwo8QYO9fpf0Wul3RJPwESNGmMWVfu5awKOfmw7wpJ+bFkTo/4PU9MZJP4O//vWv5vesQqvs7EM/F735jIuLMzfi2hdU/5/r91JvRPUa6/c+ISHBDDYFAFnFvUXG9xaaFGoCqn9j9W+1ck0Ms3JvofHS9bg1/mshhl57PRbdn/at18H3NCm1BgrUQnJdr7X36pVXXnG7t9Drr4XyGu81IdW++npPoQXA1nm6tixwpWMQWLFc7zGsGn6N5VoIpK9rf3wtdNcCe6XbaTzTQhB9P+23n5NrnB4tPNH4pnFO47oWlus9i2tBi94raIGT3udpHNU4nd79RXrfuezsQwsc9Luvib1+vlpAoJ+1Dpisj3rPpPcrWtii4yZ4akwM+BFf9ymAPfvZaT+o9ERERDjq1q2bbl/liRMnmufa/ysn/ay0r5eumzlzZprrdLFYfbpuueUWR1JSkvP1RYsWmdcnT57sfE37xWk/tsz2mdGx6e+79lNftmyZ2faVV15x2+6RRx4x/RIPHTrkfE23y58/v9tr2mdRX586dWqGffiLFy9utps3b55zvfZtK1u2rOknd+bMGdOnXY9DX4uNjXVkpkOHDmafur320fv0008dkZGRjhdffNH0GdR+fSVLljT95Q8ePOhYv369o0GDBuZ3tN9loUKFHN99952zL6FrH/4BAwY4oqOjHV988YVj3Lhx5lro9u3atXO+f6VKlczv6jZq586d5vl9993ndpz6mtXvT49R+342a9bMXMv33nvPUbVqVTNmwfHjxx1FixZ1+33r+6x9B7XPoCtrnfaHVGfPnjW/r2MgpO436trP/+LFi9ddy7Fjx5pz/PHHH2+oD79+v/R3+vXr5/beem31fF3/T6U+Dh1fokaNGubapL5+efLkcezbt++698vOPgoUKOC8Vurtt982r0dFRbn93xsyZIjbdQUA7i0yPjaNvRp/rP09+eSTbvcWtWvXNtfDurfQv8e67uTJk1m6t9Dj03uFxx57zMQCK943b97cbDd37lwT73XsgP379zuqVatm4oauW7p0qdmHXou0xg5wjff6u0899ZQjb968zvNxvZ+xYrl1njVr1jTHfvjwYfOarp81a9Z1sVz78+s9iGsffo0xeh10XIisXOOsfkfr1atnYqFl/Pjx5vWPP/44w/uAp59+2tznuI7zlNF3Lrv7WLBggfM1695LP6OtW7c6X1+zZg1jGNgUNfzIdVoimVHpodV/XGufU4/mnlVaeqlNsrJKS2Fd57DVEk6dmk6bkHuT7l9r2q2ab4vWhGrsSl2DrTUD2mzLtU+bNhnTJtwZ0VJ5PT+rNltp0zMt7dU+eFrCrrWs2rT6p59+ksTExEyP3eqXpyXDWpqutb9ao2A1o9T+dwsXLjQl7NqcTEvUrRF9tVm31nK7TtnjSpvk6aj9WvutJdZa82/VzGjzQV2s74k2+VdWLbo2F0yP1vhrrYPWWF+9etW0AtBjXrFihfm8//a3v5maDm1K6EpHBdbPKSNac6Pfay1dT91v1LVZqWurAy1N13PRZvH6eWvTRE/QGgbX99bn2p3Ctame63Fo6f+5c+dMTUnqbh5Ka0H0M0stO/vQKRBdu7Pod09pLZLr/z3r9cy+0wDginuL/9Ea5ozuLTp27Gjivdb8Z/XeQv+GV6pUydR4W/HeGh9AWw5qvNem59rqS+8rsnqNXeO93kvo/jTmZ0bPQ1sRau25jhtg0ZYEqWO5fm7WmADaEkFrza0uHWnFqxuhrQNcux9oaz5t/eb6mbvGTv3O6n2Axk69L9LuGVn5zmVnH3quWpNv0fPW/w/aqsGKuYr4a1//G+EDyCUaCKymVWnRQPLee++ZZt6aPGmQ0eCkgTIr06oobVKWnQF0NIi50iCmfZy83ZdYm5FrwHW9IVD6R9ha70r7faem/dFS9w+z+qNZdM55veau10+buWky/q9//cu8T58+fcw116RME/TMWDdW2hxdbxzSojcR2oRdA7om2Vo4oLRZmhWEUvcntAK29idUmmjqzYv2DVSpuxhYA/ToOej5uU4TpDQJtfr2aVO4pUuXmhuuf/zjH87me67XXW9StK+h9mFMPW9xRqzCi8yunRaUaCHGJ598ct3npsd6o/QauN78qDvuuMM8un6ftZBDm1bq90ALUixpDUKY3vlnZx+pv7vWZ6L9S9N6Pa3vNACkJ5jvLbTZvhXzdd9WM37r3sKaZs6KMXoeVqG9HlNW7y00vrrG2CpVqphz1Oun8V67vim9t9Am6G+99ZZJyJUmvdrlTV9PL967dgHTBNZinZs2Xbdo9zuN5Zq8Wqz7Cb0fcI3l+rMWqOu11+5urt0ZtKuDJ6X+zPU+R4/T9TPX7iXWfVHqCobU9wHpfeeysw/tVpE6LmusJf4GDxJ+5CqtPdY/RPpHNz1aaqmDnmjNrfZ905pb7WekA/No/7zMalqtfXhaeqOxa+DIyjF5Qnrvk1bS7A90oB2tHdaaAk2utQZBa7+1RF37NGalBkC30X78Or5BWlIHLG/w1PdJvysPPPCA6eOv5683S9oHUscj0EGbclrrlF16M6W1M1pTo3Mj682I1kjMnj3bbYDBjM4/u/tI77sbaN9pAP6He4sbY/e/wzpugfbp1/FhdGA7LWTQQor+/fvnWtzN6X1RWt+57O6D+AsSfuQqHQBFadOtjOgfYi1910UTPf1jrTWyGqi1FNnTU6HpdDKp/9jpIHGu08Boabf+kU1NS5Jda1Szc2xa867NrLW23LWW32qOpes9QfejI7RqEHCtycjofbRUOj3a1cA6Xi3RT11i71oqr03nlixZ4jbwjc4EkFp6100DmdYW6Hcho2ur56Dnp/t2LWHXz9GVthDQAYC0KWBqej30+uSkEMFqDqkDFqZ306mDBOnsCDookesgfdodwFP0GmhzPKtWX+l7KqtJvbbq0JsDndLKdZpITdazyhP7AABP4N7ixu4ttJVe6tHotWm4DjinI/B78t7CU7ITy3VE+v/X3n2AR1VtDR9fgUCoAUFDuRSx0YsUAcFCkQjoBY1eC1I0gCCogALixUjTKEgXQREJvFdE8BWuAlIEgSuEKihNiqKgGPAqXTrzPWu9z5lvJiSQhEkmOfP/Pc9xMnP2nDlzEtl7nb332jrCb8qUKX7l9Lr7Th8IxO9ff+e+yRx15IkmF3RGOKanXZSaQBwDoYU5/MgyOuxI76zq8GCd75Ua7f1MzgkonWHD2iuqUqokM2L69Ol+c/+0ctB/oDUzqm9Ap1lpnaXlnCHNyZfYSc+5aQWgd/F12JsvzayrFY/v518N/Rxd11Z7Mxw6h12H42tgr3eKk9MhgKltuo6uztfTClXvLie/m+zcHXbuHvveLdbrpz3Cyel1S2lI+z/+8Q/rAZ88eXKKuQl0HrxvQy/5sfU7+tJzatGihc3j9B1id/DgQeuZ1hwAetc8vfSY2rCKj4+/ZJ36y10P/dl3mGIg+P496fH1ufa+ayPXOQ/9+/Id1qjXIvmwyssJxDEA4GrRtrj6toXO705ez+uUAF1aV+t7vRmQ0r/tGWlbBEp66nItm7zXWqcbatvCVyB+/5on6Ny5c97nuuyvXhPnmqenXZSaQBwDoYUefmQK7QHWO6z6j5z+46sVsvZi6t1enbucPKmZLw0gddidzvXS8jpHW/8R0zlITkI2rSA14cikSZMsyNJ/pDXZSFrmWqdEh3fpsTUxip6vzjHXXlpN1ubQeX9aWevcOA1Cdc62rhnrm+gmveemlaneCdYeBq2watasaUMLtQLToWbJj301SWT0Dr4OG9ekOtrTq99Fk/Xpd02eQ0Bdbmiks1/PWxtamihG50JqT68m/NOGgga+moxOey90KTZNHqQNDe2JSWm4mC4po40GXWJG18LVxoJeH13yR+fvd+vWzXphGjVqZA0Z/fvS17WHWRsk+n5NAKffR+98N2jQwNaXdXq3fe/c67xzZz1mXYpI5xbq9dFGny5ZkxHasNDGlP6d6Plr0iD97jo6QXtKtFdfh/Dr7/TFF1+0hoa+R3vKAzlfTv/f0qGqes31707/X9Thq7pMkpP/QP/f0t4t/VvW89T/x3R5PP2dam9NWgTiGACQHrQtMqdtoXO3k9f5Wi9qHXW5aQwZaVsEUlrrck36q79//T1ou0RH2+noheT5bgLx+9fAW2+u6+9SRx/o35ien06BU+lpF6UmEMdAiAn2MgFwF2dZEmfT5VJ02a177rnHlqHxXZ4mtaVzdOkUXX6tdOnS9n591KVgdu3a5fc+XeJEl34JDw/3W0ZElyCpWrVqiueX2tI5H330kS0HFhUVZcvT6TJmvkukOUaOHGlLy+hSLrqU24YNGy455uXOLfmyfOr48eO2LI1+zzx58thycyNGjPBbyk3pcXr06HHJOaW2pE9yBw8e9Dz55JOea6+91q6rLmeT0tIrejz9/mmly9zpUkh6TXT5P70WS5Ys8e7XpXYaNGhg11W/Y79+/bxLv+j1d5w4ccLz+OOPe4oWLWr7fK+TLnHz5ptv2u/V+Rxd+mbw4MGeo0ePesudPHnSrlGxYsVsCaG2bdt6du7cacfT5f18ffPNN7b8oJbTZWyaNGniWb16dZqXgkq+LJ/js88+89x+++32fSMjIz233Xab/X05dNmh5s2b2+fq76JLly7eJZB8fx8ZXZavYMGCtkSRs/ShLp2kx9JlEn3p0kX6t6bXs1KlSvbZKX1man93V3sMvW76uv6t+3L+n5w9e3a6vjsA96JtEVpti9Sute+yfOmpy3WZuhdeeMFTqlQp+z3oNU5MTEzXNU7r3+iKFSs8Xbt2tXaKnlO7du08f/zxh1/ZtLaLLvc3d7XHSO33cbk6HzlXmP4n2DcdACCz6NBEXSZIe0wuN9zTDbSXRXtXdM4gAADIGjrtQUcQ6ChHHXUIZCfM4QfgGjqnPzkdVqi5BnwT2wAAAAChgDn8AFxD5+zpPEKdu6hz+XS+p246zzArlu/LLJrMMKWbGb5KliyZZecDAEAo0Lo3pYTCyXM1ANkZAT8A19BENprARxMJ6rD2cuXKyaBBgyxxUU72/PPPW9K/y2F2FgAAgaXJhHWo/uVoQmEgO2MOPwBkc9u3b5cDBw5ctoyuIQ0AAAJHl1Hctm3bZcvoKkGaNR/Irgj4AQAAAABwIYb0p8HFixetd03X5PRdyxsAgGDR+/XHjx+X0qVLW2JKXD3qewCA2+p6Av400Mo/Jyf8AgC41/79+6VMmTLBPg1XoL4HALitrifgTwO90+9c6MjIyGCfDgAAcuzYMQtOnToKV4/6HgDgtrqegD8NnGF9WvnTAAAAZCcMPQ8c6nsAgNvqeib9AQAAAADgQgT8AAAg6H799Vd54oknpHjx4pI/f36pXr26bNiwwS9xUVxcnJQqVcr261KUu3fv9jvGn3/+Ke3atbPe+aJFi0psbKycOHEiCN8GAIDsgYAfAAAE1eHDh6VRo0aSJ08e+eKLL2T79u0ycuRIv7Wthw8fLuPGjZNJkybJ2rVrpWDBghIdHS2nT5/2ltFgX9fMXrJkicybN09WrlwpXbt2DdK3AgAg+MI8esscV0yWUKRIETl69Chz+gAA2YKb6qaXXnpJVq1aJf/5z39S3K9NFV2S6IUXXpAXX3zRXtPvXaJECUlISJBHH31UduzYIVWqVJH169dL3bp1rczChQulVatW8ssvv9j7kztz5oxtyZMjueGaAgByvkDU9fTwAwCAoPrss88sSH/44YclKipKbr31Vpk8ebJ3/969eyUpKcmG8Tu0AVS/fn1JTEy05/qow/idYF9peV23WEcEpCQ+Pt6O42wsyQcAcBsCfgAAEFQ//vijTJw4UW6++WZZtGiRdO/eXZ577jmZNm2a7ddgX2mPvi997uzTR71Z4Cs8PFyKFSvmLZPcgAEDrNfE2XQ5PgAA3IRl+QAAQFBdvHjReuZff/11e649/Fu3brX5+h07dsy0z42IiLANAAC3oocfAAAElWbe1/n3vipXriz79u2zn0uWLGmPBw8e9Cujz519+njo0CG//efPn7fM/U4ZAABCDQE/AAAIKs3Qv3PnTr/Xdu3aJeXLl7efK1SoYEH70qVL/RIZ6dz8hg0b2nN9PHLkiGzcuNFbZtmyZTZ6QOf6AwAQihjSDwAAgqp3795y++2325D+f/zjH7Ju3Tp57733bFNhYWHSq1cvGTZsmM3z1xsAr7zyimXeb9u2rXdEwL333itdunSxqQDnzp2Tnj17Wgb/lDL0AwAQCgj4AWS52IT1qe6b0qlelp4LgOCrV6+ezJkzx5LoDRkyxAL6MWPGSLt27bxl+vXrJydPnpSuXbtaT37jxo1t2b18+fJ5y3z44YcW5Ddr1syy88fExMi4ceOC9K2A0Ha5ul5R3wNZg4AfAAAE3X333WdbarSXX28G6JYazcg/Y8aMTDpDAAByHubwAwAAAADgQgT8AAAAAAC4EAE/AAAAAAAuFNSA/8KFC5ZlV5Pz5M+fX2688UYZOnSoeDwebxn9OS4uztbo1TLNmzeX3bt3+x1H19jVxD6RkZFStGhRiY2NlRMnTviV+e677+SOO+6w5D5ly5aV4cOHZ9n3BAAAAAAgpJL2vfnmmzJx4kSZNm2aVK1aVTZs2CBPPvmkFClSRJ577jkro4G5ZtjVMs4yPNHR0bJ9+3ZvZl4N9n/77TdZsmSJLcOjx9Asvk7iHl2rt0WLFnazQJfq2bJlizz11FN2c0DLAQAAAAhcFn4A2UNQA/7Vq1dLmzZtpHXr1vb8+uuvl48++sjW33V693VZnoEDB1o5NX36dClRooTMnTvX1tbdsWOHLcuzfv16qVu3rpUZP368tGrVSt566y1be1eX6Tl79qx88MEHkjdvXru5sHnzZhk1ahQBPwAAAJDFWKIXCIEh/bfffrssXbpUdu3aZc+//fZb+frrr6Vly5b2fO/evZKUlGQ98w7t/a9fv74kJibac33Unnon2FdaXtffXbt2rbfMnXfeacG+Q0cJ7Ny5Uw4fPnzJeZ05c8ZGBfhuAAAAAADkJEHt4X/ppZcsmK5UqZLkzp3b5vS/9tprNkRfabCvtEfflz539uljVFSU3/7w8HBbi9e3jE4HSH4MZ98111zjty8+Pl4GDx4c8O8LAAAAAEBI9PDPmjXLhtvrXPtvvvnG5unrMHx9DKYBAwbI0aNHvdv+/fuDej4AAAAAAOSoHv6+fftaL7/OxVfVq1eXn3/+2XrYO3bsKCVLlrTXDx48aFn6Hfq8Vq1a9rOWOXTokN9xz58/b5n7nffro77Hl/PcKeMrIiLCNgAAAAAAcqqg9vD/9ddfNtfelw7tv3jxov2sw/A1INd5/g6dAqBz8xs2bGjP9fHIkSOyceNGb5lly5bZMXSuv1Nm5cqVlsHfoRn9K1aseMlwfgAAAAAA3CCoAf/9999vc/bnz58vP/30k8yZM8cy5z/wwAO2PywsTHr16iXDhg2Tzz77zJbT69Chg2Xeb9u2rZWpXLmy3HvvvdKlSxfL7r9q1Srp2bOnjRrQcurxxx+3hH2xsbGybds2+fjjj2Xs2LHSp0+fYH59AAAAAADcOaRfl8975ZVX5JlnnrFh+RqgP/300xIXF+ct069fPzl58qQtn6c9+Y0bN7Zl+PLly+cto3kANMhv1qyZjRiIiYmRcePG+WX2X7x4sfTo0UPq1Kkj1157rX0GS/IBAAAAANwqzKOL3eOydBqB3jTQBH6RkZHBPh0gx2PtXeDqUTcFHtcUCExdfrVoCwCBq5eCOqQfAAAAAABkDgJ+AAAAAABciIAfAAAAAAAXIuAHAAAAAMCFgpqlHwAAAEDoJebLzPMi6R/w/9HDDwAAAACACxHwAwAAAADgQgT8AAAAAAC4EAE/AAAAAAAuRMAPAAAAAIALkaUfAAAAQLaRXVcHAHIievgBAAAAAHAhevgBAAAAhMQIgSmd6mXpuQDBRg8/AAAAAAAuRA8/AAAAEIJz4entBtyPHn4AAAAAAFyIgB8AAAAAABci4AcAAAAAwIUI+AEAAAAAcCECfgAAEFSDBg2SsLAwv61SpUre/adPn5YePXpI8eLFpVChQhITEyMHDx70O8a+ffukdevWUqBAAYmKipK+ffvK+fPng/BtAADIPoIa8F9//fWXVPC6aaUeyAp++fLlUrt2bYmIiJCbbrpJEhISsvR7AgCAy6tatar89ttv3u3rr7/27uvdu7d8/vnnMnv2bFmxYoUcOHBAHnzwQe/+CxcuWFvg7Nmzsnr1apk2bZrV9XFxcUH6NgAAZA9BXZZv/fr1Vkk7tm7dKvfcc488/PDD3gp+/vz5VsEXKVJEevbsaRX8qlWr/Cr4kiVLWgWvDYQOHTpInjx55PXXX7cye/futTLdunWTDz/8UJYuXSqdO3eWUqVKSXR0dJC+OQAA8BUeHm71eXJHjx6VKVOmyIwZM6Rp06b22tSpU6Vy5cqyZs0aadCggSxevFi2b98uX375pZQoUUJq1aolQ4cOlf79+9vogbx586b4mWfOnLHNcezYsUz8hgAAhFgP/3XXXWeVu7PNmzdPbrzxRrnrrru8FfyoUaOsgq9Tp45V8BrYawWvnAr+X//6l1XuLVu2tAp+woQJdpdfTZo0SSpUqCAjR460xoHeNHjooYdk9OjRwfzqAADAx+7du6V06dJyww03SLt27WwEn9q4caOcO3dOmjdv7i2rw/3LlSsniYmJ9lwfq1evbsG+Q2/qawC/bdu2VD8zPj7eOhScrWzZspn6HQEACNk5/Bqga+D+1FNP2bD+QFXwWsb3GE4Z5xgp0bv9egzfDQAAZI769evbEPyFCxfKxIkTbXTeHXfcIcePH5ekpCTroS9atKjfe7Tu131KH33bAs5+Z19qBgwYYB0MzrZ///5M+X4AAITkkH5fc+fOlSNHjkinTp3seaAq+NTKaBB/6tQpyZ8/f4p3/AcPHhzgbwgAAFKiI/QcNWrUsBsA5cuXl1mzZqVYTweK5vbRDQAAt8o2Pfw6fF8rfB3OF2zc8QcAIHj0Zv8tt9wie/bssSl/OgpQOwV8aRJfZ86/PiZP6us8TykvAAAAoSJbBPw///yzJdrRZHqOQFXwqZWJjIxMtddA7/brft8NAABkjRMnTsgPP/xgCXY1h48m49Wku46dO3faHP+GDRvac33csmWLHDp0yFtmyZIlVn9XqVIlKN8BAIDsIFsE/JqMT5fU02z6jkBV8FrG9xhOGecYAAAguF588UVbbu+nn36y5LwPPPCA5M6dWx577DFLphcbGyt9+vSRr776ynL8PPnkk1aPa4Z+1aJFC6v327dvL99++60sWrRIBg4caEv7MmQfABDKgj6H/+LFixbwd+zY0ZbkcfhW8MWKFbMg/tlnn021gh8+fLjN109ewetyfG+//bb069fPEgIuW7bM5gTqcn8AACD4fvnlFwvu//jjD1vBp3HjxrYij/6sdGWdXLlySUxMjCXW1eS777zzjvf9enNAV/rp3r27tRMKFixo7YohQ4YE8VsBWSM2YX2wTwFANhb0gF+H8muvvQbjyQWigtcl+TS47927t4wdO1bKlCkj77//vh0LAAAE38yZMy+7P1++fLbkrm6p0SR/CxYsyISzAwAg5wp6wK+99B6PJ1Mr+Lvvvls2bdp01ecKAAAAAEBOkS3m8AMAAAAAgMAi4AcAAAAAwIUI+AEAAAAAcCECfgAAAAAAXIiAHwAAAAAAFyLgBwAAAADAhQj4AQAAAABwIQJ+AAAAAABciIAfAAAAAAAXIuAHAAAAAMCFCPgBAAAAAHAhAn4AAAAAAFwoPNgnAAAAACDrxSasD/YpAMhk9PADAAAAAOBCBPwAAAAAALgQAT8AAAAAAC5EwA8AAAAAgAuRtA8AAABASLhSosIpnepl2bkAWYEefgAAAAAAXIiAHwAAAAAAFyLgBwAAAADAhYIe8P/666/yxBNPSPHixSV//vxSvXp12bBhg3e/x+ORuLg4KVWqlO1v3ry57N692+8Yf/75p7Rr104iIyOlaNGiEhsbKydOnPAr891338kdd9wh+fLlk7Jly8rw4cOz7DsCAAAAABBSAf/hw4elUaNGkidPHvniiy9k+/btMnLkSLnmmmu8ZTQwHzdunEyaNEnWrl0rBQsWlOjoaDl9+rS3jAb727ZtkyVLlsi8efNk5cqV0rVrV+/+Y8eOSYsWLaR8+fKyceNGGTFihAwaNEjee++9LP/OAAAAAAC4Pkv/m2++ab3tU6dO9b5WoUIFv979MWPGyMCBA6VNmzb22vTp06VEiRIyd+5cefTRR2XHjh2ycOFCWb9+vdStW9fKjB8/Xlq1aiVvvfWWlC5dWj788EM5e/asfPDBB5I3b16pWrWqbN68WUaNGuV3Y8Bx5swZ23xvGAAAAAAAkJMEtYf/s88+syD94YcflqioKLn11ltl8uTJ3v179+6VpKQkG8bvKFKkiNSvX18SExPtuT7qMH4n2FdaPleuXDYiwClz5513WrDv0FECO3futFEGycXHx9vnOJvelAAAAAAAICcJag//jz/+KBMnTpQ+ffrIyy+/bL30zz33nAXmHTt2tGBfaY++L33u7NNHvVngKzw8XIoVK+ZXxnfkgO8xdZ/vFAI1YMAAOyffHn6CfgAAAMDdYhPWX3b/lE71suxcgBwf8F+8eNF65l9//XV7rj38W7dutfn6GvAHS0REhG0AAAAAAORUQR3Sr5n3q1Sp4vda5cqVZd++ffZzyZIl7fHgwYN+ZfS5s08fDx065Lf//Pnzlrnft0xKx/D9DAAAAAAA3CSoAb9m6Nd59L527dpl2fSVDsPXgHzp0qV+w+t1bn7Dhg3tuT4eOXLEsu87li1bZqMHdK6/U0Yz9587d85bRjP6V6xY8ZLh/AAAAAAAuEFQA/7evXvLmjVrbEj/nj17ZMaMGbZUXo8ePWx/WFiY9OrVS4YNG2YJ/rZs2SIdOnSwzPtt27b1jgi49957pUuXLrJu3TpZtWqV9OzZ0zL4azn1+OOPW16A2NhYW77v448/lrFjx/rN0wcAAAAAwE2COoe/Xr16MmfOHEuSN2TIEOvR12X42rVr5y3Tr18/OXnypC2fpz35jRs3tmX48uXL5y2jy+5pkN+sWTPLzh8TEyPjxo3z7tdM+4sXL7YbCXXq1JFrr71W4uLiUlySDwAAAAAANwjz6GL3uCydRqA3DY4ePSqRkZHBPh3A1RlwyX4LpA11U+BxTeHGrPIILNopyGn1UlCH9AMAAPh64403vFP6HKdPn7ZResWLF5dChQrZSL7kyXg14W/r1q2lQIECtlxv3759LYkvAAChjIAfAABkC+vXr5d3331XatSocUnOn88//1xmz54tK1askAMHDsiDDz7o3X/hwgUL9s+ePSurV6+WadOmSUJCgk3fAwAglBHwAwCAoDtx4oTl8Jk8ebLfCjo6jHHKlCkyatQoadq0qeXimTp1qgX2mvhXaZ6e7du3y7/+9S+pVauWtGzZUoYOHSoTJkywmwAAAIQqAn4AABB0OmRfe+mbN2/u97ouu6vL6vq+XqlSJSlXrpwkJibac32sXr26lChRwlsmOjra5j7q6jypOXPmjJXx3QAAcJOgZukHAACYOXOmfPPNNzakP7mkpCRbWrdo0aJ+r2twr/ucMr7BvrPf2Zea+Ph4GTx4cIC+BQAA2Q89/AAAIGj2798vzz//vC2x67vkblbQZYF1yoCz6bkAAOAmBPwAACBodMj+oUOHpHbt2hIeHm6bJuYbN26c/aw99ToP/8iRI37v0yz9JUuWtJ/1MXnWfue5UyYlERERtsyR7wYAgJsQ8AMAgKBp1qyZbNmyRTZv3uzd6tatawn8nJ/z5MkjS5cu9b5n586dtgxfw4YN7bk+6jH0xoFjyZIlFsBXqVIlKN8LAIDsgDn8AAAgaAoXLizVqlXze61gwYJSvHhx7+uxsbHSp08fKVasmAXxzz77rAX5DRo0sP0tWrSwwL59+/YyfPhwm7c/cOBASwSovfgAAIQqAn4AAJCtjR49WnLlyiUxMTGWWV8z8L/zzjve/blz55Z58+ZJ9+7d7UaA3jDo2LGjDBkyJKjnDQBAsBHwAwCAbGX58uV+zzWZ34QJE2xLTfny5WXBggVZcHYAAOQczOEHAAAAAMCFCPgBAAAAAHAhAn4AAAAAAFyIgB8AAAAAABci4AcAAAAAwIUI+AEAAAAAcCECfgAAAAAAXIiAHwAAAAAAFyLgBwAAAADAhYIa8A8aNEjCwsL8tkqVKnn3nz59Wnr06CHFixeXQoUKSUxMjBw8eNDvGPv27ZPWrVtLgQIFJCoqSvr27Svnz5/3K7N8+XKpXbu2REREyE033SQJCQlZ9h0BAAAAAMgxAf+PP/4YsBOoWrWq/Pbbb97t66+/9u7r3bu3fP755zJ79mxZsWKFHDhwQB588EHv/gsXLliwf/bsWVm9erVMmzbNgvm4uDhvmb1791qZJk2ayObNm6VXr17SuXNnWbRoUcC+AwAAoSiQ7QEAABB44Rl5k/aS33XXXRIbGysPPfSQ5MuXL+MnEB4uJUuWvOT1o0ePypQpU2TGjBnStGlTe23q1KlSuXJlWbNmjTRo0EAWL14s27dvly+//FJKlCghtWrVkqFDh0r//v1t9EDevHll0qRJUqFCBRk5cqQdQ9+vNxVGjx4t0dHRGT5vAABCXSDbAwBSFpuwPtinACDUevi/+eYbqVGjhvTp08eC9aefflrWrVuXoRPYvXu3lC5dWm644QZp166dDdFXGzdulHPnzknz5s29ZXW4f7ly5SQxMdGe62P16tUt2HdoEH/s2DHZtm2bt4zvMZwyzjFScubMGTuG7wYAADKvPQAAALJJwK896WPHjrUh9h988IENxW/cuLFUq1ZNRo0aJb///nuajlO/fn0bgr9w4UKZOHGiDb+/44475Pjx45KUlGQ99EWLFvV7jwb3uk/po2+w7+x39l2ujAbxp06dSvG84uPjpUiRIt6tbNmy6bg6AACEhkC1BwAAQDZM2qfD8XVOvc6xf/PNN2XPnj3y4osvWoDcoUMHq/gvp2XLlvLwww9b74D2ui9YsECOHDkis2bNkmAaMGCATSlwtv379wf1fAAAyM6utj0AAACyYcC/YcMGeeaZZ6RUqVJ2J18r9x9++EGWLFlid/vbtGmTruNpb/4tt9xiDQUdGqjJ+PQGgC/N0u/M+dfH5Fn7nedXKhMZGSn58+dP8Tw0m7/u990AAEDWtAcAAEAQA36tzHXu/O23324V+fTp0+Xnn3+WYcOGWYI8HZavQ/V1bl96nDhxwhoI2mCoU6eO5MmTR5YuXerdv3PnTpvj37BhQ3uuj1u2bJFDhw55y2jjQgP0KlWqeMv4HsMp4xwDAABkTGa1BwAAQBCz9Ot8+6eeeko6depkwXlKoqKiLMv+5WgPwP333y/ly5e3hsKrr74quXPnlscee8zmzmvWX00EVKxYMQvin332WQvUNUO/atGihQX27du3l+HDh9t8/YEDB0qPHj2sl15169ZN3n77benXr5+d87Jly2zKwPz58zPy1QEAQIDbAwAAIBsF/NpDrtnyc+XyHyDg8Xhsvrvu04R7HTt2vOxxfvnlFwvu//jjD7nuuuss0Y8uuac/K106Tz8jJibGMufrPP933nnH+369OTBv3jzp3r273QgoWLCgfeaQIUO8ZbSHQYP73r17W2KhMmXKyPvvv8+SfAAAXKVAtQcAwC3LJE7pVC/LzgVIizCP1srppIG2JuDRu/a+NHDX1y5cuCBuohn9dcSBJvBjPj+QuZUlFSWQc+omt7UHssM1BdIbYCJ7oR2D7FYvZWgOf2r3CHQOfr58+TJ0IgAAIGehPQAAgIuG9Ot8ehUWFiZxcXFSoEAB7z69i7927VpbkxcAALgX7QEAAFwY8G/atMl7R1+z4+u8PIf+XLNmTUvEBwAA3Iv2AAAALgz4v/rqK3t88sknLQEe89sAAAg9tAcAAHBxlv6pU6cG/kwAAECOQnsAAACXBPwPPvigJCQk2F18/flyPv3000CcGwAAyGZoDwBA6liJCDk24NflADQ5j/MzAAAIPbQHAABwYcDvO2yPIXwAAIQm2gMAAOQcuTLyplOnTslff/3lff7zzz/LmDFjZPHixYE8NwAAkI3RHgAAwIUBf5s2bWT69On285EjR+S2226TkSNH2usTJ04M9DkCAIBsiPYAAAAuDPi/+eYbueOOO+znTz75REqWLGl39bXSHzduXKDPEQAAZEO0BwAAcGHAr8P3ChcubD/rsD3N0psrVy5p0KCBVfQAAMD9aA8AAOCSpH2+brrpJpk7d6488MADsmjRIundu7e9fujQIVumBwAAuB/tASBzl3EDgKD08MfFxcmLL74o119/vdSvX18aNmzovbt/6623XvVJAQCA7C9Q7QGd71+jRg27SaCbHueLL77w7j99+rT06NFDihcvLoUKFZKYmBg5ePCg3zH27dsnrVu3lgIFCkhUVJT07dtXzp8/H8BvCwBAiPTwP/TQQ9K4cWP57bffpGbNmt7XmzVrZnf5AQCA+wWqPVCmTBl544035OabbxaPxyPTpk2zxH+bNm2SqlWr2siB+fPny+zZs6VIkSLSs2dPmz6watUqe/+FCxcs2NccAqtXr7bz6dChg+TJk0def/31TPnuAADkBGEerVlxWceOHbMGxtGjRxmiCGTy8MUpnepl6bkAOZXb66ZixYrJiBEj7KbCddddJzNmzLCf1ffffy+VK1eWxMREyxegowHuu+8+OXDggJQoUcLKTJo0Sfr37y+///675M2bN02f6fZriuyJIf2hgzYO0isQ9VKGevhPnjxpd+KXLl1q8/QuXrzot//HH3/M0MkAAICcIzPaA9pbrz35emwd2r9x40Y5d+6cNG/e3FumUqVKUq5cOW/Ar4/Vq1f3BvsqOjpaunfvLtu2bUt1esGZM2ds821YAQDgJhkK+Dt37iwrVqyQ9u3bS6lSpSQsLCzwZwYAALK1QLYHtmzZYgG+ztfXefpz5syRKlWqyObNm62HvmjRon7lNbhPSkqyn/XRN9h39jv7UhMfHy+DBw/O8DkDaUUvPoAcFfDr0DmdS9eoUaPAnxEAAMgRAtkeqFixogX3Omzxk08+kY4dO9rNhMw0YMAA6dOnj18Pf9myZTP1MwEAyPYB/zXXXGNz6wAAQOgKZHtAe/F1mT9Vp04dWb9+vYwdO1YeeeQROXv2rBw5csSvl1+z9GuSPqWP69at8zuek8XfKZOSiIgI2wAAcKsMLcs3dOhQW4rnr7/+CtiJ6BxAHQrYq1evgC/Ds3z5cqldu7ZV6tqYSEhICNh5AwAQqjKjPeDQfAA6v16Df822r3kCHDt37rT631kGUB91SoDmEXAsWbLEEhzptAAAAEJVhnr4R44cKT/88IPNj9O1d7Ui9vXNN9+k63h6F//dd9+1NXh9BWIZnr1791qZbt26yYcffmgNBp1zqHMNNaEPAADImEC1B3RofcuWLS0R3/Hjxy0jv96sX7RokdX/sbGxNvReRxNoEP/ss89akK8J+1SLFi0ssNdcAsOHD7d5+wMHDrROA3rwAQChLEMBf9u2bQN2AidOnJB27drJ5MmTZdiwYd7XdQ7flClTrNJv2rSpvTZ16lRbhmfNmjVWyS9evFi2b98uX375pTU2atWqZb0NugzPoEGDbHigLstToUIFa5Qoff/XX38to0ePJuAHAOAqBKo9oD3zesNeb9xrgK8dABrs33PPPbZf6+xcuXLZSD/t9df6+5133vG+P3fu3DJv3jzLyq83AgoWLGg5AIYMGRKQ8wMAIKQC/ldffTVgJ6B337UHXpfb8Q34A7UMj5bxPYZTxnfqQHIs0wMAQNa1B/QG/+Xky5dPJkyYYFtqypcvLwsWLAjI+QAAENJz+JUmz3n//fdtGN6ff/7pHbr366+/pvkYM2fOtPfosjjJ6XC8QCzDk1oZDeJPnTqV4nnp+WgPg7ORsRcAgMxrDwAAgGzUw//dd99Zr7kGwz/99JN06dLF5tV9+umnlkRn+vTpVzzG/v375fnnn7ekOnrnPjthmR4AALKmPQAAALJZD78Gw506dZLdu3f7BeutWrWSlStXpukYOmRf5+xp9vzw8HDbdL3dcePG2c/aC+8sw+Mr+TI8ybP2J1+GJ7UymvQnf/78KZ6bJvjR/b4bAAAIfHsAAABks4Bfs+o//fTTl7z+t7/9zTuU/kqaNWtmS+hs3rzZu9WtW9cS+Dk/B2IZHi3jewynjHMMAACQMYFoDwAAgMyToSH92gOeUiK7Xbt2yXXXXZemYxQuXFiqVavm95pm1S1evLj39UAsw6PL8b399tvSr18/eeqpp2TZsmUya9YsW+4PAABkXCDaAwAAIJv18P/973+3pW40i74KCwuznnddDk+XzAkUXYbnvvvus2PeeeedNjxf5wUmX4ZHH/VGwBNPPGHL+vguw6NL8mlwr736NWvWtOX5NLkQS/IBAHB1sqo9AAAAMibM4/F40vumo0ePykMPPWRD+U6cOCGlS5e23nUNunVJHO2pdxPtvdCERPq9mc8PXL3YhPWp7pvSqV6WnguQU2WHuslt7YHscE0RevUeQgdtHASjXsrQkH79UO0xX7VqlXz77bdWyWvyveTr3QMAAPeiPQAAQPaW7oD/4sWLkpCQYEPrdQkeHb6nw+Z1uL0OFtDnAADA3WgPAADgsjn8WoHrfL3OnTvLr7/+KtWrV5eqVavKzz//bMvyPPDAA5l3pgAAIFugPQAAgAt7+PVOvq6rq8vcNWnSxG+fZr9v27atTJ8+3RLnAQAAd6I9AACAC3v4P/roI3n55ZcvqdxV06ZN5aWXXpIPP/wwkOcHAACyGdoDAAC4MOD/7rvv5N577011f8uWLS1pDwAAcC/aAwAAuDDg//PPP6VEiRKp7td9hw8fDsR5AQCAbIr2AAAALgz4L1y4IOHhqU/7z507t5w/fz4Q5wUAALIp2gMAALgwaZ9m5dXsuxERESnuP3PmTKDOCwAAZFO0BwAg/WIT1l92/5RO9bLsXBA60hXwd+zY8YplyMgLAIC70R4AAMCFAf/UqVMz70wAAECOQHsAAAAXzuEHAAAAAAA5AwE/AAAAAAAuRMAPAAAAAIALEfADAAAAAOBCBPwAAAAAALgQAT8AAAAAAC5EwA8AAAAAgAsR8AMAAAAA4EIE/AAAAAAAuFBQA/6JEydKjRo1JDIy0raGDRvKF1984d1/+vRp6dGjhxQvXlwKFSokMTExcvDgQb9j7Nu3T1q3bi0FChSQqKgo6du3r5w/f96vzPLly6V27doSEREhN910kyQkJGTZdwQAAAAAIOQC/jJlysgbb7whGzdulA0bNkjTpk2lTZs2sm3bNtvfu3dv+fzzz2X27NmyYsUKOXDggDz44IPe91+4cMGC/bNnz8rq1atl2rRpFszHxcV5y+zdu9fKNGnSRDZv3iy9evWSzp07y6JFi4LynQEAAAAAyAphHo/HI9lIsWLFZMSIEfLQQw/JddddJzNmzLCf1ffffy+VK1eWxMREadCggY0GuO++++xGQIkSJazMpEmTpH///vL7779L3rx57ef58+fL1q1bvZ/x6KOPypEjR2ThwoVpOqdjx45JkSJF5OjRozYSAcDViU1Yn+q+KZ3qZem5ADkVdVPgcU2RGfUakFa0gZAZ9VK2mcOvvfUzZ86UkydP2tB+7fU/d+6cNG/e3FumUqVKUq5cOQv4lT5Wr17dG+yr6OhouzDOKAEt43sMp4xzjJScOXPGjuG7AQAAAACQkwQ94N+yZYvNz9f59d26dZM5c+ZIlSpVJCkpyXroixYt6ldeg3vdp/TRN9h39jv7LldGg/hTp06leE7x8fF2J8XZypYtG9DvDAAAAACA6wP+ihUr2tz6tWvXSvfu3aVjx46yffv2oJ7TgAEDbNiEs+3fvz+o5wMAAAAAQI4L+LUXXzPn16lTx3rWa9asKWPHjpWSJUtaMj6da+9Ls/TrPqWPybP2O8+vVEbnQOTPnz/Fc9LRBs7KAc4GAAAyh9b/9erVk8KFC9uKO23btpWdO3f6lQnUyj0AAISSoAf8yV28eNHm0OsNgDx58sjSpUu9+7Ty18pc5/grfdQpAYcOHfKWWbJkiQXoOi3AKeN7DKeMcwwAABBcuhKPBvNr1qyxOlpz+LRo0cLy+jgCsXIPAAChJjzYQ+dbtmxpifiOHz9uGfmXL19uS+bp3PnY2Fjp06ePZe7XIP7ZZ5+1QF0z9CttDGhg3759exk+fLjN1x84cKA1GrSXXmlegLffflv69esnTz31lCxbtkxmzZplmfsBAEDwJV81RwN17aHXBL533nmnTa+bMmWKtRN0CV81depUW7lHbxJou2Dx4sU2JfDLL7+0XD21atWSoUOH2mo9gwYNshGFQEaRhR9AThXUHn7tme/QoYPN42/WrJmsX7/egv177rnH9o8ePdqW3dNhe1rh6/D8Tz/91Pv+3Llzy7x58+xRbwQ88cQTdrwhQ4Z4y1SoUMGCe+0x0OkCI0eOlPfff98y9QMAgOxHA3ylN/xVoFbuSY5VeQAAbhfUHn69W385+fLlkwkTJtiWmvLly8uCBQsue5y7775bNm3alOHzBAAAWTe1r1evXtKoUSOpVq2avRaolXtSyh0wePDgTPomABDYkSRTOtXLsnOBe2S7OfwAACB06bS8rVu3ysyZMzP9s1iVBwDgdkHt4QcAAHD07NnTpuqtXLlSypQp433dd+Ue317+5Cv3rFu37rIr9ySn+X6cnD8AALgRPfwAACCoPB6PBftz5syx5Lqaf8dXoFbuAQAg1NDDDwAAgj6MXzPw//vf/5bChQt759zrij358+cP2Mo9AACEGgJ+AAAQVBMnTvQm2fWlS+916tTJu3JPrly5bOUeza6vGfjfeeedS1bu6d69u90IKFiwoHTs2NFv5R4AAEINAT8AAAj6kP4rCdTKPQAAhBLm8AMAAAAA4EIE/AAAAAAAuBABPwAAAAAALkTADwAAAACACxHwAwAAAADgQgT8AAAAAAC4EAE/AAAAAAAuRMAPAAAAAIALEfADAAAAAOBC4cE+AQAAACCYYhPWB/sUACBTEPADAAAAQA6+MTWlU70sPRfkHAzpBwAAAADAhQj4AQAAAABwIQJ+AAAAAABcKKgBf3x8vNSrV08KFy4sUVFR0rZtW9m5c6dfmdOnT0uPHj2kePHiUqhQIYmJiZGDBw/6ldm3b5+0bt1aChQoYMfp27evnD9/3q/M8uXLpXbt2hIRESE33XSTJCQkZMl3BAAAAAAg5AL+FStWWDC/Zs0aWbJkiZw7d05atGghJ0+e9Jbp3bu3fP755zJ79mwrf+DAAXnwwQe9+y9cuGDB/tmzZ2X16tUybdo0C+bj4uK8Zfbu3WtlmjRpIps3b5ZevXpJ586dZdGiRVn+nQEAAAAAcH2W/oULF/o910Bde+g3btwod955pxw9elSmTJkiM2bMkKZNm1qZqVOnSuXKle0mQYMGDWTx4sWyfft2+fLLL6VEiRJSq1YtGTp0qPTv318GDRokefPmlUmTJkmFChVk5MiRdgx9/9dffy2jR4+W6OjooHx3AAAAAABCZg6/BviqWLFi9qiBv/b6N2/e3FumUqVKUq5cOUlMTLTn+li9enUL9h0axB87dky2bdvmLeN7DKeMc4zkzpw5Y+/33QAAAAAAyEmyTcB/8eJFG2rfqFEjqVatmr2WlJRkPfRFixb1K6vBve5zyvgG+85+Z9/lymggf+rUqRRzCxQpUsS7lS1bNsDfFgAAAAAAFw/p96Vz+bdu3WpD7YNtwIAB0qdPH+9zvTFA0A8AAJBzxSasD/YpAEBoBvw9e/aUefPmycqVK6VMmTLe10uWLGnJ+I4cOeLXy69Z+nWfU2bdunV+x3Oy+PuWSZ7ZX59HRkZK/vz5LzkfzeSvGwAAAAAAOVVQh/R7PB4L9ufMmSPLli2zxHq+6tSpI3ny5JGlS5d6X9Nl+3QZvoYNG9pzfdyyZYscOnTIW0Yz/mswX6VKFW8Z32M4ZZxjAAAAAADgNuHBHsavGfj//e9/S+HChb1z7nXevPa862NsbKwNr9dEfhrEP/vssxaoa4Z+pcv4aWDfvn17GT58uB1j4MCBdmynl75bt27y9ttvS79+/eSpp56ymwuzZs2S+fPnB/PrAwAAAADgzh7+iRMnWmb+u+++W0qVKuXdPv74Y28ZXTrvvvvuk5iYGFuqT4fnf/rpp979uXPntukA+qg3Ap544gnp0KGDDBkyxFtGRw5ocK+9+jVr1rTl+d5//32W5AMAAAAAuFZ4sIf0X0m+fPlkwoQJtqWmfPnysmDBgsseR28qbNq0KUPnCQAAAABATpNtluUDAAAAAACBQ8APAAAAAIALEfADAAAAAOBCBPwAAAAAALgQAT8AAAAAAC5EwA8AAAAAgAsR8AMAAAAA4EIE/AAAAAAAuBABPwAACKqVK1fK/fffL6VLl5awsDCZO3eu336PxyNxcXFSqlQpyZ8/vzRv3lx2797tV+bPP/+Udu3aSWRkpBQtWlRiY2PlxIkTWfxNACB7ik1Yf9kN7kXADwAAgurkyZNSs2ZNmTBhQor7hw8fLuPGjZNJkybJ2rVrpWDBghIdHS2nT5/2ltFgf9u2bbJkyRKZN2+e3UTo2rVrFn4LAACyn/BgnwAAAAhtLVu2tC0l2rs/ZswYGThwoLRp08Zemz59upQoUcJGAjz66KOyY8cOWbhwoaxfv17q1q1rZcaPHy+tWrWSt956y0YOAAAQiujhBwAA2dbevXslKSnJhvE7ihQpIvXr15fExER7ro86jN8J9pWWz5Url40ISM2ZM2fk2LFjfhsAAG5CDz8AAMi2NNhX2qPvS587+/QxKirKb394eLgUK1bMWyYl8fHxMnjw4Ew5bwDISszDR2ro4QcAACFpwIABcvToUe+2f//+YJ8SAAABRcAPAACyrZIlS9rjwYMH/V7X584+fTx06JDf/vPnz1vmfqdMSiIiIiyrv+8GAICbMKQfAABkWxUqVLCgfenSpVKrVi17Tefa69z87t272/OGDRvKkSNHZOPGjVKnTh17bdmyZXLx4kWb64/QwJBmALgUAT8AAAiqEydOyJ49e/wS9W3evNnm4JcrV0569eolw4YNk5tvvtluALzyyiuWeb9t27ZWvnLlynLvvfdKly5dbOm+c+fOSc+ePS2DPxn6AQChjIAfAAAE1YYNG6RJkybe53369LHHjh07SkJCgvTr109OnjwpXbt2tZ78xo0b2zJ8+fLl877nww8/tCC/WbNmlp0/JiZGxo0bF5TvAwBuGiEzpVO9LD0XBBYBPwAACKq7775bPB5PqvvDwsJkyJAhtqVGRwPMmDEjk84QAICciaR9AAAAAAC4EAE/AAAAAAAuFNSAf+XKlXL//fdbQh0drjd37ly//Tq8Ly4uTkqVKiX58+eX5s2by+7du/3K6JI77dq1s6V0ihYtKrGxsZb8x9d3330nd9xxh831K1u2rAwfPjxLvh8AAAAAACEZ8GsCnpo1a8qECRNS3K+BuSbc0Yy7uvxOwYIFJTo6Wk6fPu0to8H+tm3bZMmSJTJv3jy7iaBJfRy6dE+LFi2kfPnytlzPiBEjZNCgQfLee+9lyXcEAAAAACDkkva1bNnStpRo7/6YMWNk4MCB0qZNG3tt+vTpUqJECRsJoEvt7Nixw7L0rl+/XurWrWtlxo8fL61atZK33nrLRg5o1t6zZ8/KBx98IHnz5pWqVavaUj+jRo3yuzHg68yZM7b53jQAAAAAACAnybZZ+nUN3qSkJBvG7yhSpIjUr19fEhMTLeDXRx3G7wT7Ssvrcjw6IuCBBx6wMnfeeacF+w4dJfDmm2/K4cOH5Zprrrnks+Pj42Xw4MFZ8C0BAABwtcuGAQByWNI+DfaV9uj70ufOPn2Miory2x8eHm5L8/iWSekYvp+R3IABA+To0aPebf/+/QH8ZgAAAAAAhHAPfzBFRETYBgAAAABATpVtA/6SJUva48GDBy1Lv0Of16pVy1vm0KFDfu87f/68Ze533q+P+h5fznOnDAAAAIKPYfsAECJD+itUqGAB+dKlS/2S5+nc/IYNG9pzfTxy5Ihl33csW7ZMLl68aHP9nTKauf/cuXPeMprRv2LFiinO3wcAAAAAwA2C2sN/4sQJ2bNnj1+iPs2gr3Pwy5UrJ7169ZJhw4bJzTffbDcAXnnlFcu837ZtWytfuXJluffee6VLly62dJ8G9T179rSEflpOPf7445aALzY2Vvr37y9bt26VsWPHyujRoyW73sGe0qlelp4LACDreij5Nx4AAIREwL9hwwZp0qSJ93mfPn3ssWPHjpKQkCD9+vWTkydP2vJ52pPfuHFjW4YvX7583vfosnsa5Ddr1syy88fExMi4ceP8MvsvXrxYevToIXXq1JFrr71W4uLiUl2SDwAAAAAANwhqwH/33XeLx+NJdX9YWJgMGTLEttToaIAZM2Zc9nNq1Kgh//nPf67qXAEAAAAAyEmybdI+AAAAuAtJ+QAga2XbpH0AAAAAACDjCPgBAAAAAHAhAn4AAAAAAFyIOfwAAAAAgBSx3GzORg8/AAAAAAAuRMAPAAAAAIALMaQfAAAAAJAhDPnP3ujhBwAAAADAhQj4AQAAAABwIQJ+AAAAAABciDn8AAAAyJK5vACArEUPPwAAAAAALkQPPwAAANKMXnwAgfo3gwz+mY+AHwAAAACQ5VjSL/MxpB8AAAAAABci4AcAAAAAwIUI+AEAAAAAcCECfgAAAAAAXIikfQAAACGELPsAQuHfKxL+hWDAP2HCBBkxYoQkJSVJzZo1Zfz48XLbbbcF+7QAAECAUNf/H4J6AKGOFQBCLOD/+OOPpU+fPjJp0iSpX7++jBkzRqKjo2Xnzp0SFRUV7NMDAABXKZTqegJ6AEBahEzAP2rUKOnSpYs8+eST9lwbA/Pnz5cPPvhAXnrppWCfHgAAuEpuqusJ6AEg5/47PSUbjR4IiYD/7NmzsnHjRhkwYID3tVy5cknz5s0lMTHxkvJnzpyxzXH06FF7PHbsWODO6dSJVPcF8nOA7Ii/f7jZ5f6+A/k37hzH4/EE5HihVtdnRX3f48ONATkOACDwjl3h3/qr+Tc8O9X1IRHw//e//5ULFy5IiRIl/F7X599///0l5ePj42Xw4MGXvF62bFnJCv96Jks+BsiW+PuH2wX6b/z48eNSpEgRCXXpreuzQ30PAHBnm/Nf2aiuD4mAP720d0DnADouXrwof/75pxQvXlzCwsICcqdGGxP79++XyMjIqz4ekJPw949QFsi/f73brw2A0qVLB+z8Qs3V1vf8e3b1uIZXj2t49biGV49rmDnXMBB1fUgE/Ndee63kzp1bDh486Pe6Pi9ZsuQl5SMiImzzVbRo0YCfl/4i+R8CoYq/f4SyQP3907Of8bo+kPU9/55dPa7h1eMaXj2u4dXjGgb+Gl5tXZ9LQkDevHmlTp06snTpUr+7+Pq8YcOGQT03AABw9ajrAQAI0R5+pUP2OnbsKHXr1rX1eHWpnpMnT3oz+QIAgJyNuh4AgBAN+B955BH5/fffJS4uTpKSkqRWrVqycOHCS5L7ZAUdPvjqq69eMowQCAX8/SOU8ffvrrqe3+fV4xpePa7h1eMaXj2uYfa9hmEe1vMBAAAAAMB1QmIOPwAAAAAAoYaAHwAAAAAAFyLgBwAAAADAhQj4s8jdd98tvXr1CvZpAAAAAABCBAE/AABAKiZMmCDXX3+95MuXT+rXry/r1q27bPnZs2dLpUqVrHz16tVlwYIFEurScw0nT54sd9xxh1xzzTW2NW/e/IrXPBSk9+/QMXPmTAkLC5O2bdtKqEvvNTxy5Ij06NFDSpUqZVnTb7nllpD//zm911CXRq1YsaLkz59fypYtK71795bTp09LqFq5cqXcf//9Urp0afv/cu7cuVd8z/Lly6V27dr2N3jTTTdJQkJCuj+XgB8AACAFH3/8sfTp08eWSfrmm2+kZs2aEh0dLYcOHUqx/OrVq+Wxxx6T2NhY2bRpkwVZum3dulVCVXqvoTZu9Rp+9dVXkpiYaEFCixYt5Ndff5VQld5r6Pjpp5/kxRdftBsooS691/Ds2bNyzz332DX85JNPZOfOnXYz6m9/+5uEqvRewxkzZshLL71k5Xfs2CFTpkyxY7z88ssSqk6ePGnXTW+cpMXevXuldevW0qRJE9m8ebONFu/cubMsWrQofR+sy/Ih8911112eHj162BYZGekpXry4Z+DAgZ6LFy8G+9SALPHFF194GjVq5ClSpIinWLFintatW3v27NkT7NMCssyFCxc8b775pufGG2/05M2b11O2bFnPsGHDgn1auIzbbrvN6m3f32Hp0qU98fHxKZb/xz/+Yf+2+apfv77n6aef9oSq9F7D5M6fP+8pXLiwZ9q0aZ5QlZFrqNft9ttv97z//vuejh07etq0aeMJZem9hhMnTvTccMMNnrNnz2bhWbrrGmrZpk2b+r3Wp08fawvC49EwfM6cOZct069fP0/VqlX9XnvkkUc80dHR6foseviz0LRp0yQ8PNyGv4wdO1ZGjRol77//frBPC8iyu5p6Z3jDhg2ydOlSyZUrlzzwwANy8eLFYJ8akCUGDBggb7zxhrzyyiuyfft26/0oUaJEsE8LqdAevo0bN9qQcof+u6XPtec5Jfq6b3mlPWCplXe7jFzD5P766y85d+6cFCtWTEJRRq/hkCFDJCoqykabhLqMXMPPPvtMGjZsaEP69d/patWqyeuvvy4XLlyQUJSRa3j77bfbe5xh/z/++KNNiWjVqlWWnXdOlxigOiU8wOeFy9BhaaNHj7Y5GzqfZcuWLfa8S5cuwT41INPFxMT4Pf/ggw/kuuuus8BHK1LAzY4fP243et9++23p2LGjvXbjjTdK48aNg31qSMV///tfa9wnvymjz7///vsU35OUlJRieX09FGXkGibXv39/m++avNEbKjJyDb/++msbPq1DgJGxa6jB6bJly6Rdu3YWpO7Zs0eeeeYZu/mkQ9RDTUau4eOPP27v03pOO7TPnz8v3bp1C+kh/emVWp1y7NgxOXXqlOVGSAt6+LNQgwYNLNh36J3D3bt3h+zdQoQW/VvXeZk33HCDREZGWtIXtW/fvmCfGpDpdP7imTNnpFmzZsE+FSDH0BExmnRuzpw5liQMabu52L59e5tvfu211wb7dHIsHX2oIyTee+89qVOnjjzyyCPyz3/+UyZNmhTsU8sxNB+Hjop45513bM7/p59+KvPnz5ehQ4cG+9RCDj38ALKEZiUtX768NUK0t0YrU+3Z12FigNul9S48sg8NlnLnzi0HDx70e12flyxZMsX36OvpKe92GbmGjrfeessC/i+//FJq1KghoSq91/CHH36wRHNa5zqcqXM6rVSTz+noolCSkb9DzcyfJ08ee5+jcuXK1uOq7Za8efNKKMnINdTpa3rzSZPMKV21RKd3du3a1W6e6JQAXF5qdYp2nKWnXcGVzkJr1671e75mzRq5+eab/f4xAdzojz/+sEbGwIEDrYdTK83Dhw8H+7SALKP/1mvlrPkrkDNog1579nx/Zxo46XMdoZcSfT3573jJkiWplne7jFxDNXz4cOsFXLhwodStW1dCWXqvoS4JqVNGdTi/s/3973/3ZvnW6aWhJiN/h40aNbJh/L55hnbt2mU3AkIt2M/oNdT8G8mDeifm+b+cdbiSgNUpGUoriAxl6S9UqJCnd+/enu+//94zY8YMT8GCBT2TJk0K9qkBmU4zuerKFE888YRn9+7dnqVLl3rq1auXpgylgFsMGjTIc80111i2cV2hIjEx0TJoI/uaOXOmJyIiwpOQkODZvn27p2vXrp6iRYt6kpKSbH/79u09L730krf8qlWrPOHh4Z633nrLs2PHDs+rr77qyZMnj2fLli2eUJXea/jGG2/YKhaffPKJ57fffvNux48f94Sq9F7D5MjSn/5ruG/fPlsdomfPnp6dO3d65s2b54mKigrplVXSew313z+9hh999JHnxx9/9CxevNhWqdHVTELV8ePHPZs2bbJN28CjRo2yn3/++Wfbr9dPr6NDr1uBAgU8ffv2tTplwoQJnty5c3sWLlyYrs8l4M/CgP+ZZ57xdOvWzZbl00bfyy+/zLJ8CBlLlizxVK5c2SqLGjVqeJYvX07Aj5C78aWNxfLly1sQWK5cOc/rr78e7NPCFYwfP95+VxqE6rJUa9as8avbNZjyNWvWLM8tt9xi5XU5pfnz53tCXXquof7/oXVD8k2Dh1CW3r9DXwT8GbuGq1evtmU1td2iS/S99tprttxhKEvPNTx37pzd6NYgP1++fLYUrcZChw8f9oSqr776KsV/35zrpo96HZO/p1atWnbN9e9w6tSp6f7cMP1P+sYEAAAAAACA7I45/AAAAAAAuBABPwAAAAAALkTADwAAAACACxHwAwAAAADgQgT8AAAAAAC4EAE/AAAAAAAuRMAPAAAAAIALEfADAAAAAOBCBPwAAiIhIUGKFi0asOMtX75cwsLC5MiRIwE7JgAAbuDxeKRr165SrFgxqyu1/u3Vq1ewTwtANkTAD+RAgwYNklq1akl28sgjj8iuXbuCfRoAALjewoUL7Ub7vHnz5LfffpNq1aoF/DPuvvtubiIALhAe7BMAkPOdO3dO8ufPb5ubnD17VvLmzRvs0wAAwM8PP/wgpUqVkttvv92eh4e7t0lPXQxcHXr4gSC5ePGiDB8+XG666SaJiIiQcuXKyWuvvWb7+vfvL7fccosUKFBAbrjhBnnllVcsqFZ6R3/w4MHy7bff2jA+3fQ1pcPfO3fuLNddd51ERkZK06ZNrZyvYcOGSVRUlBQuXNjKvvTSS36jBfS8hgwZImXKlLHz0n3ak+D46aef7DM//vhjueuuuyRfvnzy4Ycfpjik//PPP5d69epZmWuvvVYeeOAB777/+Z//kbp169p5lCxZUh5//HE5dOhQhq6l89lz586Vm2++2T4vOjpa9u/f79c4atOmjZQoUUIKFSpk5/Xll1/6Hef666+XoUOHSocOHez66XDJK/0+fEdcfPDBB/Z71OM/88wzcuHCBfsd6/fTa+78fgEAyKhOnTrJs88+K/v27bP6WOuu5A4fPmx12TXXXGN1V8uWLWX37t3e/X/88Yc89thj8re//c32V69eXT766CO/z1ixYoWMHTvW29bQ+j8tU/Hmz58vNWrUsLq4QYMGsnXr1jR/rjOyoGfPnja6QNsOWp+rUaNGWfmCBQtK2bJlrZ49ceLEJW0BHfVQsWJFO/5DDz0kf/31l0ybNs2uk16P5557zupnIFQQ8ANBMmDAAHnjjTcseNy+fbvMmDHDglGlQbBWXPq6VraTJ0+W0aNHe4fOv/DCC1K1alUbxqebvqYefvhhC5q/+OIL2bhxo9SuXVuaNWsmf/75p+3XwFyDzjfffNP2a3A6ceJEv/PSzxs5cqS89dZb8t1331lF+/e//92voaD0RsHzzz8vO3bs8FbGvrTC1wC/VatWsmnTJlm6dKncdttt3v0aMGtwrTckNFDXhoQ2MDJKK3T9btOnT5dVq1bZzY9HH33Uu18bBXoueh56Pvfee6/cf//91mDypd+7Zs2aVkZ/N1f6ffjeUNDrrjdHtPEyZcoUad26tfzyyy/WaNJrPnDgQFm7dm2GvyMAAFoPOTfmtQ2wfv36S8pofbphwwb57LPPJDEx0eb8ax3o3Kw+ffq01KlTx+pqDcj1Bnf79u1l3bp13s9o2LChdOnSxdvW0CA7Lfr27WvtCD0v7YDQujatn+vQAF179bU+nzRpkr2WK1cuGTdunGzbts32L1u2TPr163dJW0DLzJw50+pjvQmhbZEFCxbYpp0N7777rnzyyScZvPpADuQBkOWOHTvmiYiI8EyePDlN5UeMGOGpU6eO9/mrr77qqVmzpl+Z//znP57IyEjP6dOn/V6/8cYbPe+++679XL9+fU+PHj389jdq1MjvWKVLl/a89tprfmXq1avneeaZZ+znvXv3evSfjjFjxviVmTp1qqdIkSLe5w0bNvS0a9fOk1br16+34x4/ftyef/XVV/b88OHDV3yvfraWXbNmjfe1HTt22Gtr165N9X1Vq1b1jB8/3vu8fPnynrZt22bo91GgQAH7vTqio6M9119/vefChQve1ypWrOiJj4+/4vEBALic0aNHW53luOuuuzzPP/+8/bxr1y6r/1atWuXd/9///teTP39+z6xZs1I9ZuvWrT0vvPBCisdMC6fenjlzpve1P/74wz73448/Ttfn3nrrrVf8vNmzZ3uKFy9+SVtgz5493teefvppq5+dtoVTP+vrQKhw74QfIBvTXvEzZ85Y73tKdLi83qHWXmPtmT5//rwNMb8c7SnXssWLF/d7/dSpU3YctXPnThsC50t73fUuuTp27JgcOHBAGjVq5FdGnyefGqDD8S9n8+bN1jOQGh1hoEPh9bg69FCnEijtca9SpYqkl85f1GH6jkqVKtnQPr3W+h312ujnaa+C9lToNdVrk7yHP6XvlZbfhw4V1JEADh2tkTt3buuR8H0to9MWAABIC633tE6sX7++9zVtG+gwd92ndEj766+/LrNmzZJff/3V5slru0SHwV8tHRng0FUEMvK5OgogOZ2GFx8fL99//721V7Qu1hED2qvvvF8fb7zxRr96V+tnnWrn+xp1MUIJAT8QBJdLbqdD79q1a2fz9HWofJEiRWxomg6PuxwNRDWBjw5fSy6Qy+U5dA5dRr/jyZMn7bvpptMMdMifBt76XCv/zPDiiy/KkiVLbMi+5k3Q89O5fck/L/n3SuvvI0+ePH7PdR5jSq85NzYAAAiWESNG2LD9MWPGeOfF65z5zKqD0/u5yetinfZ33333Sffu3W36nt5I+PrrryU2Ntbe6wT81MXApQj4gSDQxHIacOp8ck2c52v16tVSvnx5+ec//+l97eeff/Yro/Pakiec0fn6SUlJdlc/pQQ+Su+y65w6TeTj8J37p73WpUuXtjlzmpDPoc9959+nhSbs0e/35JNPXrJP785r4h7NYeDMCdS5hldD7/TrMZzz1NEMOo+/cuXK3u+gcxqdxIF6g+RKCYjS+vsAACC70HpP60TNGeNk8dc6V+tFZwSd1omayPaJJ56w5xoA69K6viPsUmprpMWaNWssR5DSEXx6XN+6+Eqfm9qoQC2rN9udkXM6SgDAlRHwA0GgmWs187smm9EKVYfM//7775aIRm8GaG+39iLrEHUdgj5nzhy/92tAv3fvXhs2r0l7dCh58+bNbRhd27ZtLTO8ZpXX4flO8jwdqq5ZfXWYvf6sjQAdqq6J+TTzvG+ynVdffdWGxGnm+alTp9rnaE98eugxdMqCHkeT52njQxPm6PfWhoB+7/Hjx0u3bt0scY8m8Lsaegdfv58OvdebHprhV7MDOzcA9Lp++umnljxI7+5rQr603OFPy+8DAIDsQustDaq1vtcEddpG0ES7mhlfX3fKaOI6vamtmes1A/7Bgwf9Am9ta+hNA705rkPitVfdd5paajShoE4h0KHzerNcM+1r2yStn5sSHZmnif+03aD1uG8yPwCXR5Z+IEg04NRs+3FxcXbnWzPt65wyzYjfu3dvC1g14NZK0ckW74iJibEs802aNLHh8JoVXoNYDajvvPNO61XXgF8Dbe2NdrL/69B0XR1Ah7friAC9aaC93noDwqHL1fTp08fOTYfbaZZbzfKrlXR66LI6s2fPtvfq99AlAp0svHrOmvVe92slrz39OtT+auhwPr2ZoMv76Q0UbZzoDQ2HNiq0caE3OrSxoMPz9RpcSVp+HwAAZCd6s17nwesweO0M0Cz92kZwhrfrqjFaB2pdqPW1Lh/rBOUObStoLhqtp52pd2mhdbqu4qOfryMPdYlevcmf1s9Nia6eo/W4rnhTrVo164TQ+fwArixMM/eloRwAl7rnnnuswtWlanIqvXmgcwB1CD8AAMh6mkNIOyJ0GH9m5A4CkDEM6QdCiGay1SFwemdd79rryADNeqvJ7AAAAAC4C0P6gRDiO+xfh9rpMLv//d//tfn/2VnLli1tiH5Kmy7vAwAAMpfm3EmtLtZ9ALInhvQDyPZ0rd5Tp06luE+TCOkGAAAyj+YZOnbsWIr7dJWfqKioLD8nAFdGwA8AAAAAgAsxpB8AAAAAABci4AcAAAAAwIUI+AEAAAAAcCECfgAAAAAAXIiAHwAAAAAAFyLgBwAAAADAhQj4AQAAAAAQ9/l/RrxKrK6AP2UAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\"\"\"\n", + "This example demonstrates the full capabilities of NePS Spaces\n", + "by defining a neural network architecture using PyTorch modules.\n", + "It showcases how to interact with the NePS Spaces API to create,\n", + "sample and evaluate a neural network pipeline.\n", + "It also demonstrates how to convert the pipeline to a callable\n", + "and how to run NePS with the defined pipeline and space.\n", + "\"\"\"\n", + "\n", + "import numpy as np\n", + "import torch\n", + "import torch.nn as nn\n", + "import neps\n", + "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled\n", + "from neps.space.neps_spaces import sampling\n", + "from neps.space.neps_spaces import neps_space\n", + "\n", + "# Define the NEPS space for the neural network architecture\n", + "class SimpleSpace(PipelineSpace):\n", + " int_param1 = neps.Integer(1,100, prior=50, prior_confidence=\"low\")\n", + " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\")\n", + " int_param3 = neps.Integer(1,100, prior=50, prior_confidence=\"high\")\n", + " int_param4 = neps.Integer(1,4, prior=1.5, prior_confidence=\"low\")\n", + " categorical_param = Categorical((\"a\", \"b\", \"c\"), prior=0, prior_confidence=\"high\")\n", + " float_param = neps.Float(0, 1.0, prior=0.5, prior_confidence=\"high\")\n", + "\n", + "# Sampling and printing one random configuration of the pipeline\n", + "pipeline = SimpleSpace()\n", + "random_sampler = sampling.RandomSampler({})\n", + "sampler = sampling.PriorOrFallbackSampler(fallback_sampler=random_sampler, always_use_prior=False)\n", + "\n", + "values = {\"int_param1\": [],\n", + " \"int_param2\": [], \"int_param3\": [], \"int_param4\": [], \n", + " \"categorical_param\": [], \"float_param\": []}\n", + "for i in range(10000):\n", + " resolved_pipeline, resolution_context = neps_space.resolve(pipeline,domain_sampler=sampler)\n", + "\n", + " # s = resolved_pipeline.int_param1\n", + " # print(resolved_pipeline.get_attrs())\n", + " values[\"int_param1\"].append(resolved_pipeline.int_param1)\n", + " values[\"int_param2\"].append(resolved_pipeline.int_param2)\n", + " values[\"int_param3\"].append(resolved_pipeline.int_param3)\n", + " values[\"int_param4\"].append(resolved_pipeline.int_param4)\n", + " values[\"categorical_param\"].append(resolved_pipeline.categorical_param)\n", + " values[\"float_param\"].append(resolved_pipeline.float_param)\n", + "\n", + "# Plot the distribution of the sampled values, each in a separate subplot\n", + "import matplotlib.pyplot as plt\n", + "_, axs = plt.subplots(3, 2, figsize=(12, 12))\n", + "axs = axs.flatten()\n", + "for i, (param_name, param_values) in enumerate(values.items()):\n", + " axs[i].hist(param_values,align='mid',bins=50,alpha=0.7)\n", + " axs[i].set_title(f'Distribution of {param_name}')\n", + " axs[i].set_xlabel(param_name)\n", + " axs[i].set_ylabel('Density')\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "59280930", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:neps.api:Starting neps.run using root directory results/fidelity_ignore_test\n", + "WARNING:neps.optimizers.algorithms:Warning: No priors are defined in the search space, priorband will sample uniformly. Consider using hyperband instead.\n", + "INFO:neps.runtime:Overwriting optimization directory 'results\\fidelity_ignore_test' as `overwrite_optimization_dir=True`.\n", + "INFO:neps.runtime:Launching NePS\n", + "INFO:neps.runtime:Worker '20016-2025-07-11T18:31:10.131893+00:00' sampled new trial: 1_0.\n", + "INFO:neps.state.pipeline_eval:Successful evaluation of '1_0': 68.\n", + "INFO:neps.runtime:Worker '20016-2025-07-11T18:31:10.131893+00:00' evaluated trial: 1_0 as State.SUCCESS.\n", + "INFO:neps.runtime:The total number of evaluations has reached the maximum allowed of `self.settings.max_evaluations_total=1`. To allow more evaluations, increase this value or use a different stopping criterion.\n", + "INFO:neps.api:The post run summary has been created, which is a csv file with the output of all data in the run.\n", + "You can find a full dataframe at: C:\\Users\\Amega\\Git\\neps\\neps_examples\\basic_usage\\results\\fidelity_ignore_test\\summary\\full.csv.\n", + "You can find a quick summary at: C:\\Users\\Amega\\Git\\neps\\neps_examples\\basic_usage\\results\\fidelity_ignore_test\\summary\\short.csv.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# Configs: 1\n", + "\n", + " success: 1\n", + "\n", + "# Best Found (config 1_0):\n", + "\n", + " objective_to_minimize: 68.0\n", + " config: int_param1\n", + " (68)\n", + " \t01 :: 68\n", + " config: fidelity_param\n", + " (1)\n", + " \t01 :: 1\n", + " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\basic_usage\\results\\fidelity_ignore_test\\configs\\config_1_0\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "import torch\n", + "import torch.nn as nn\n", + "import neps\n", + "from neps.space.neps_spaces.parameters import Pipeline, Operation, Categorical, Resampled\n", + "from neps.space.neps_spaces import sampling\n", + "from neps.space.neps_spaces import neps_space\n", + "from functools import partial\n", + "\n", + "# Define the NEPS space for the neural network architecture\n", + "class SimpleSpace(PipelineSpace):\n", + " int_param1 = neps.Integer(1,100)#, prior=50, prior_confidence=\"low\")\n", + " # int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\")\n", + " # int_param3 = neps.Integer(1,100, prior=50, prior_confidence=\"high\")\n", + " # int_param4 = neps.Integer(1,3, prior=2, prior_confidence=\"low\")\n", + " # categorical_param = Categorical((\"a\", \"b\", int_param1))\n", + " # float_param = neps.Float(0, 1.0, prior=0.5, prior_confidence=\"high\")\n", + " fidelity_param = neps.Fidelity(neps.Integer(1, 10))\n", + "\n", + "# Sampling and printing one random configuration of the pipeline\n", + "pipeline = SimpleSpace()\n", + "\n", + "def evaluate_pipeline(int_param1,**_args):#, fidelity_param, categorical_param):\n", + " return int_param1\n", + "\n", + "for i in range(1):\n", + " # resolved_pipeline, resolution_context = neps_space.resolve(pipeline,domain_sampler=sampler)\n", + " new_rs=neps.algorithms.NePSRandomSearch(pipeline,ignore_fidelity=True)\n", + " # old_rs=neps.algorithms.random_search(pipeline,ignore_fidelity=True)\n", + " # print(new_rs({},None))\n", + "\n", + " # s = resolved_pipeline.int_param1\n", + " # print(resolved_pipeline.get_attrs())\n", + " import logging\n", + "\n", + " logging.basicConfig(level=logging.INFO)\n", + " neps.run(evaluate_pipeline,pipeline,root_directory=\"results/fidelity_ignore_test\",overwrite_working_directory=True,optimizer=neps.algorithms.neps_priorband, max_evaluations_total=1)\n", + " neps.status(\"results/fidelity_ignore_test\",print_summary=True, pipeline_space_variables=(SimpleSpace(),[\"int_param1\", \"fidelity_param\"]))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "neural-pipeline-search (3.13.1)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/neps_examples/basic_usage/pytorch_nn_example.py b/neps_examples/basic_usage/pytorch_nn_example.py index 80baebbdb..5fab96a46 100644 --- a/neps_examples/basic_usage/pytorch_nn_example.py +++ b/neps_examples/basic_usage/pytorch_nn_example.py @@ -11,9 +11,15 @@ import torch import torch.nn as nn import neps -from neps.space.neps_spaces.parameters import Pipeline, Operation, Categorical, Resampled +from neps.space.neps_spaces.parameters import ( + PipelineSpace, + Operation, + Categorical, + Resampled, +) from neps.space.neps_spaces import neps_space + # Define the neural network architecture using PyTorch as usual class ReLUConvBN(nn.Module): def __init__(self, out_channels, kernel_size, stride, padding): @@ -46,11 +52,32 @@ def forward(self, x): # Define the NEPS space for the neural network architecture -class NN_Space(Pipeline): +class NN_Space(PipelineSpace): _id = Operation(operator=Identity) - _three = Operation(operator=nn.Conv2d,kwargs={"in_channels":3, "out_channels":3, "kernel_size":3, "stride":1, "padding":1}) - _one = Operation(operator=nn.Conv2d,kwargs={"in_channels":3, "out_channels":3, "kernel_size":1, "stride":1, "padding":0}) - _reluconvbn = Operation(operator=ReLUConvBN, kwargs={"out_channels":3, "kernel_size":3, "stride":1, "padding":1}) + _three = Operation( + operator=nn.Conv2d, + kwargs={ + "in_channels": 3, + "out_channels": 3, + "kernel_size": 3, + "stride": 1, + "padding": 1, + }, + ) + _one = Operation( + operator=nn.Conv2d, + kwargs={ + "in_channels": 3, + "out_channels": 3, + "kernel_size": 1, + "stride": 1, + "padding": 0, + }, + ) + _reluconvbn = Operation( + operator=ReLUConvBN, + kwargs={"out_channels": 3, "kernel_size": 3, "stride": 1, "padding": 1}, + ) _O = Categorical(choices=(_three, _one, _id)) @@ -91,6 +118,7 @@ class NN_Space(Pipeline): args=Resampled(_model_ARGS), ) + # Sampling and printing one random configuration of the pipeline pipeline = NN_Space() resolved_pipeline, resolution_context = neps_space.resolve(pipeline) @@ -106,6 +134,7 @@ class NN_Space(Pipeline): print("\n\nConfig string:\n") print(pretty_config) + # Defining the pipeline, using the model from the NN_space space as callable def evaluate_pipeline(model: nn.Sequential): x = torch.ones(size=[1, 3, 220, 220]) @@ -124,4 +153,8 @@ def evaluate_pipeline(model: nn.Sequential): max_evaluations_total=5, overwrite_working_directory=True, ) -neps.status("results/neps_spaces_nn_example", print_summary=True, pipeline_space_variables=(pipeline_space, ["model"])) +neps.status( + "results/neps_spaces_nn_example", + print_summary=True, + pipeline_space_variables=(pipeline_space, ["model"]), +) diff --git a/neps_examples/convenience/logging_additional_info.py b/neps_examples/convenience/logging_additional_info.py index 5487fa9d8..c290843e4 100644 --- a/neps_examples/convenience/logging_additional_info.py +++ b/neps_examples/convenience/logging_additional_info.py @@ -21,7 +21,7 @@ def evaluate_pipeline(float1, float2, categorical, integer1, integer2): } -class PipelineSpace(neps.Pipeline): +class HPOSpace(neps.PipelineSpace): float1 = neps.Float(min_value=0, max_value=1) float2 = neps.Float(min_value=-10, max_value=10) categorical = neps.Categorical(choices=(0, 1)) @@ -32,7 +32,7 @@ class PipelineSpace(neps.Pipeline): logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=PipelineSpace(), + pipeline_space=HPOSpace(), root_directory="results/logging_additional_info", max_evaluations_total=5, ) diff --git a/neps_examples/convenience/neps_tblogger_tutorial.py b/neps_examples/convenience/neps_tblogger_tutorial.py index e0a5bd3d7..338f81be4 100644 --- a/neps_examples/convenience/neps_tblogger_tutorial.py +++ b/neps_examples/convenience/neps_tblogger_tutorial.py @@ -210,13 +210,13 @@ def training( # Design the pipeline search spaces. -def pipeline_space() -> neps.Pipeline: - class PipelineSpace(neps.Pipeline): +def pipeline_space() -> neps.PipelineSpace: + class HPOSpace(neps.PipelineSpace): lr = neps.Float(min_value=1e-5, max_value=1e-1, log=True) optim = neps.Categorical(choices=("Adam", "SGD")) weight_decay = neps.Float(min_value=1e-4, max_value=1e-1, log=True) - return PipelineSpace() + return HPOSpace() ############################################################# diff --git a/neps_examples/convenience/running_on_slurm_scripts.py b/neps_examples/convenience/running_on_slurm_scripts.py index 04cf845e2..a113bfbd7 100644 --- a/neps_examples/convenience/running_on_slurm_scripts.py +++ b/neps_examples/convenience/running_on_slurm_scripts.py @@ -50,7 +50,7 @@ def evaluate_pipeline_via_slurm( return validation_error -class PipelineSpace(neps.Pipeline): +class HPOSpace(neps.PipelineSpace): optimizer = neps.Categorical(choices=("sgd", "adam")) learning_rate = neps.Float(min_value=10e-7, max_value=10e-3, log=True) @@ -58,7 +58,7 @@ class PipelineSpace(neps.Pipeline): logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline_via_slurm, - pipeline_space=PipelineSpace(), + pipeline_space=HPOSpace(), root_directory="results/slurm_script_example", max_evaluations_total=5, ) diff --git a/neps_examples/convenience/working_directory_per_pipeline.py b/neps_examples/convenience/working_directory_per_pipeline.py index 96864d6d3..c3ebb7627 100644 --- a/neps_examples/convenience/working_directory_per_pipeline.py +++ b/neps_examples/convenience/working_directory_per_pipeline.py @@ -18,7 +18,7 @@ def evaluate_pipeline(pipeline_directory: Path, float1, categorical, integer1): return objective_to_minimize -class PipelineSpace(neps.Pipeline): +class HPOSpace(neps.PipelineSpace): float1 = neps.Float(min_value=0, max_value=1) categorical = neps.Categorical(choices=(0, 1)) integer1 = neps.Integer(min_value=0, max_value=1) @@ -27,7 +27,7 @@ class PipelineSpace(neps.Pipeline): logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=PipelineSpace(), + pipeline_space=HPOSpace(), root_directory="results/working_directory_per_pipeline", max_evaluations_total=5, ) diff --git a/neps_examples/efficiency/expert_priors_for_hyperparameters.py b/neps_examples/efficiency/expert_priors_for_hyperparameters.py index 565f86e61..d94e0accd 100644 --- a/neps_examples/efficiency/expert_priors_for_hyperparameters.py +++ b/neps_examples/efficiency/expert_priors_for_hyperparameters.py @@ -22,7 +22,7 @@ def evaluate_pipeline(some_float, some_integer, some_cat): # neps uses the default values and a confidence in this default value to construct a prior # that speeds up the search -class PipelineSpace(neps.Pipeline): +class HPOSpace(neps.PipelineSpace): some_float = ( neps.Float( min_value=1, @@ -52,7 +52,7 @@ class PipelineSpace(neps.Pipeline): logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=PipelineSpace(), + pipeline_space=HPOSpace(), root_directory="results/user_priors_example", max_evaluations_total=15, ) diff --git a/neps_examples/efficiency/multi_fidelity.py b/neps_examples/efficiency/multi_fidelity.py index 6b64bb4a5..c4a091832 100644 --- a/neps_examples/efficiency/multi_fidelity.py +++ b/neps_examples/efficiency/multi_fidelity.py @@ -83,7 +83,7 @@ def evaluate_pipeline( ) -class PipelineSpace(neps.Pipeline): +class HPOSpace(neps.PipelineSpace): learning_rate = neps.Float(min_value=1e-4, max_value=1e0, log=True) epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=10)) @@ -91,7 +91,7 @@ class PipelineSpace(neps.Pipeline): logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=PipelineSpace(), + pipeline_space=HPOSpace(), root_directory="results/multi_fidelity_example", # Optional: Do not start another evaluation after <=50 epochs, corresponds to cost # field above. diff --git a/neps_examples/efficiency/multi_fidelity_and_expert_priors.py b/neps_examples/efficiency/multi_fidelity_and_expert_priors.py index c7dd5d6d5..26e266d5f 100644 --- a/neps_examples/efficiency/multi_fidelity_and_expert_priors.py +++ b/neps_examples/efficiency/multi_fidelity_and_expert_priors.py @@ -12,7 +12,7 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): return objective_to_minimize -class PipelineSpace(neps.Pipeline): +class HPOSpace(neps.PipelineSpace): float1 = neps.Float( min_value=1, max_value=1000, @@ -38,7 +38,7 @@ class PipelineSpace(neps.Pipeline): logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=PipelineSpace(), + pipeline_space=HPOSpace(), root_directory="results/multifidelity_priors", max_evaluations_total=25, # For an alternate stopping method see multi_fidelity.py ) diff --git a/neps_examples/efficiency/pytorch_lightning_ddp.py b/neps_examples/efficiency/pytorch_lightning_ddp.py index 031c7d33d..99b2b987b 100644 --- a/neps_examples/efficiency/pytorch_lightning_ddp.py +++ b/neps_examples/efficiency/pytorch_lightning_ddp.py @@ -85,7 +85,7 @@ def evaluate_pipeline(lr=0.1, epoch=20): return trainer.logged_metrics["val_loss"].item() -class PipelineSpace(neps.Pipeline): +class HPOSpace(neps.PipelineSpace): lr = neps.Float(min_value=0.001, max_value=0.1, log=True, prior=0.01) epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=3)) @@ -93,7 +93,7 @@ class PipelineSpace(neps.Pipeline): logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=PipelineSpace(), + pipeline_space=HPOSpace(), root_directory="results/pytorch_lightning_ddp", max_evaluations_total=5, ) diff --git a/neps_examples/efficiency/pytorch_lightning_fsdp.py b/neps_examples/efficiency/pytorch_lightning_fsdp.py index bc3c22df6..f4b42d58d 100644 --- a/neps_examples/efficiency/pytorch_lightning_fsdp.py +++ b/neps_examples/efficiency/pytorch_lightning_fsdp.py @@ -56,13 +56,13 @@ def evaluate_pipeline(lr=0.1, epoch=20): logging.basicConfig(level=logging.INFO) - class PipelineSpace(neps.Pipeline): + class HPOSpace(neps.PipelineSpace): lr = neps.Float(min_value=0.001, max_value=0.1, log=True, prior=0.01) epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=3)) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=PipelineSpace(), + pipeline_space=HPOSpace(), root_directory="results/pytorch_lightning_fsdp", max_evaluations_total=5, ) diff --git a/neps_examples/efficiency/pytorch_native_ddp.py b/neps_examples/efficiency/pytorch_native_ddp.py index fab4592e0..dfc42911a 100644 --- a/neps_examples/efficiency/pytorch_native_ddp.py +++ b/neps_examples/efficiency/pytorch_native_ddp.py @@ -106,7 +106,7 @@ def evaluate_pipeline(learning_rate, epochs): return {"loss": loss} -class PipelineSpace(neps.Pipeline): +class HPOSpace(neps.PipelineSpace): learning_rate = neps.Float(min_value=10e-7, max_value=10e-3, log=True) epochs = neps.Integer(min_value=1, max_value=3) @@ -115,7 +115,7 @@ class PipelineSpace(neps.Pipeline): logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=PipelineSpace(), + pipeline_space=HPOSpace(), root_directory="results/pytorch_ddp", max_evaluations_total=25, ) diff --git a/neps_examples/efficiency/pytorch_native_fsdp.py b/neps_examples/efficiency/pytorch_native_fsdp.py index 531147908..c3cb4f9c8 100644 --- a/neps_examples/efficiency/pytorch_native_fsdp.py +++ b/neps_examples/efficiency/pytorch_native_fsdp.py @@ -208,13 +208,13 @@ def evaluate_pipeline(lr=0.1, epoch=20): logging.basicConfig(level=logging.INFO) - class PipelineSpace(neps.Pipeline): + class HPOSpace(neps.PipelineSpace): lr = neps.Float(min_value=0.0001, max_value=0.1, log=True, prior=0.01) epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=3)) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=PipelineSpace(), + pipeline_space=HPOSpace(), root_directory="results/pytorch_fsdp", max_evaluations_total=20, ) diff --git a/neps_examples/efficiency/warmstarting.py b/neps_examples/efficiency/warmstarting.py index 1f3b21ef5..04f584816 100644 --- a/neps_examples/efficiency/warmstarting.py +++ b/neps_examples/efficiency/warmstarting.py @@ -1,12 +1,22 @@ import neps import logging -from neps import Pipeline, Integer, Float, Fidelity +from neps import PipelineSpace, Integer, Float, Fidelity, Operation, Categorical from neps.space.neps_spaces import neps_space -class SimpleSpace(Pipeline): + +def operation(x): + """A simple operation that can be used in the pipeline.""" + return x + + +class SimpleSpace(PipelineSpace): int_param = Integer(0, 10) float_param = Float(0.0, 1.0) epochs = Fidelity(Integer(1, 5)) + # cat_param = Categorical((float_param, int_param)) + # op = Operation(operation, args=(float_param, int_param)) + # op2 = Operation("Test") + # op3 = Operation("Test2", args=(int_param,)) # Sampling a random configuration of the pipeline, which will be used for warmstarting @@ -14,25 +24,73 @@ class SimpleSpace(Pipeline): resolved_pipeline, resolution_context = neps_space.resolve( pipeline, environment_values={"epochs": 5} ) +# for operator in (resolved_pipeline.op, resolved_pipeline.op2, resolved_pipeline.op3): +# print("Resolved Pipeline:", operator) +# if callable(operator): +# print("Callable:", neps_space.convert_operation_to_callable(operator).__str__()) +# print(neps_space.convert_operation_to_string(resolved_pipeline.op)) +# print( +# neps_space.config_string.ConfigString( +# neps_space.convert_operation_to_string(operator) +# ).pretty_format() +# ) +# print( +# neps_space.config_string.ConfigString( +# neps_space.convert_operation_to_string(operator) +# ) +# ) +# print( +# neps_space.config_string.ConfigString( +# neps_space.convert_operation_to_string(operator) +# ).unwrapped +# ) -def evaluate_pipeline(int_param, float_param, epochs=5) -> dict[str, float]: - return {"objective_to_minimize": (int_param + float_param) * epochs, "cost": epochs} +def evaluate_pipeline(int_param, float_param, epochs=5, **kwargs) -> dict[str, float]: + return {"objective_to_minimize": -(int_param + float_param) * epochs, "cost": epochs} wanted_config = resolution_context.samplings_made wanted_env = resolution_context.environment_values wanted_result = evaluate_pipeline(**resolved_pipeline.get_attrs()) -warmstarting_configs = [(wanted_config, wanted_env, wanted_result)] +warmstarting_configs = [ + (wanted_config, wanted_env, wanted_result), + # (wanted_config, {"epochs": 2}, wanted_result), +] +from functools import partial # Running the NEPS pipeline with warmstarting logging.basicConfig(level=logging.INFO) +neps.warmstart_neps( + pipeline, + "results/warmstart_example/", + warmstarting_configs, + overwrite_working_directory=True, + optimizer=partial( + neps.algorithms.neps_random_search, + use_priors=True, + ignore_fidelity="highest fidelity", + ), +) neps.run( evaluate_pipeline=evaluate_pipeline, pipeline_space=SimpleSpace(), root_directory="results/warmstart_example/", - max_evaluations_total=15, - optimizer=neps.algorithms.neps_priorband, - warmstart_configs=warmstarting_configs, + max_evaluations_per_run=5, + optimizer=partial( + neps.algorithms.neps_random_search, + use_priors=True, + ignore_fidelity="highest fidelity", + ), + # warmstart_configs=warmstarting_configs, + overwrite_working_directory=False, +) +neps.status( + "results/warmstart_example", + print_summary=True, + pipeline_space_variables=( + SimpleSpace(), + ["int_param", "float_param", "epochs"], # , "op", "op2", "op3", "cat_param"], + ), ) diff --git a/neps_examples/experimental/freeze_thaw.py b/neps_examples/experimental/freeze_thaw.py index cd1fc9141..a2580504c 100644 --- a/neps_examples/experimental/freeze_thaw.py +++ b/neps_examples/experimental/freeze_thaw.py @@ -149,7 +149,7 @@ def training_pipeline( if __name__ == "__main__": logging.basicConfig(level=logging.INFO) - class PipelineSpace(neps.Pipeline): + class ModelSpace(neps.PipelineSpace): learning_rate = neps.Float(1e-5, 1e-1, log=True) num_layers = neps.Integer(1, 5) num_neurons = neps.Integer(64, 128) @@ -157,7 +157,7 @@ class PipelineSpace(neps.Pipeline): epochs = neps.Fidelity(neps.Integer(1, 10)) neps.run( - pipeline_space=PipelineSpace(), + pipeline_space=ModelSpace(), evaluate_pipeline=training_pipeline, optimizer="ifbo", max_evaluations_total=50, diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index 95cb10227..a6cc71f14 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -15,7 +15,7 @@ Float, Integer, Operation, - Pipeline, + PipelineSpace, Resampled, ) @@ -39,7 +39,7 @@ def hyperparameter_pipeline_to_optimize( return objective_to_minimize -class DemoHyperparameterSpace(Pipeline): +class DemoHyperparameterSpace(PipelineSpace): float1 = Float( min_value=0, max_value=1, @@ -71,7 +71,7 @@ class DemoHyperparameterSpace(Pipeline): ) -class DemoHyperparameterWithFidelitySpace(Pipeline): +class DemoHyperparameterWithFidelitySpace(PipelineSpace): float1 = Float( min_value=0, max_value=1, @@ -103,7 +103,7 @@ class DemoHyperparameterWithFidelitySpace(Pipeline): ) -class DemoHyperparameterComplexSpace(Pipeline): +class DemoHyperparameterComplexSpace(PipelineSpace): _small_float = Float( min_value=0, max_value=1, @@ -278,7 +278,7 @@ def operation_pipeline_to_optimize(model: Model, some_hp: str): return objective_to_minimize -class DemoOperationSpace(Pipeline): +class DemoOperationSpace(PipelineSpace): """A demonstration of how to use operations in a search space. This space defines a model that can be optimized using different inner functions and a factor. The model can be used to evaluate a set of values and return an objective to minimize. diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index 9292be0d7..c3be188b6 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -12,7 +12,7 @@ Fidelity, Float, Integer, - Pipeline, + PipelineSpace, ) _COSTS = {} @@ -33,7 +33,7 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): } -class DemoHyperparameterWithFidelitySpace(Pipeline): +class DemoHyperparameterWithFidelitySpace(PipelineSpace): float1 = Float( min_value=1, max_value=1000, diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index 40ea2aba6..985c4a3ad 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -12,7 +12,7 @@ Fidelity, Float, Integer, - Pipeline, + PipelineSpace, ) @@ -20,7 +20,7 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): return -float(np.sum([float1, float2, integer1])) * fidelity -class DemoHyperparameterWithFidelitySpace(Pipeline): +class DemoHyperparameterWithFidelitySpace(PipelineSpace): float1 = Float( min_value=1, max_value=1000, diff --git a/tests/test_neps_space/test_search_space__fidelity.py b/tests/test_neps_space/test_search_space__fidelity.py index 903253f9a..820e54191 100644 --- a/tests/test_neps_space/test_search_space__fidelity.py +++ b/tests/test_neps_space/test_search_space__fidelity.py @@ -11,11 +11,11 @@ Fidelity, Float, Integer, - Pipeline, + PipelineSpace, ) -class DemoHyperparametersWithFidelitySpace(Pipeline): +class DemoHyperparametersWithFidelitySpace(PipelineSpace): constant1: int = 42 float1 = Float( min_value=0, diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index c846659fd..4c79f7d73 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -4,10 +4,15 @@ import neps.space.neps_spaces.sampling from neps.space.neps_spaces import config_string, neps_space -from neps.space.neps_spaces.parameters import Categorical, Operation, Pipeline, Resampled +from neps.space.neps_spaces.parameters import ( + Categorical, + Operation, + PipelineSpace, + Resampled, +) -class GrammarLike(Pipeline): +class GrammarLike(PipelineSpace): _id = Operation(operator="Identity") _three = Operation(operator="Conv2D-3") _one = Operation(operator="Conv2D-1") @@ -99,7 +104,7 @@ class GrammarLike(Pipeline): ) -class GrammarLikeAlt(Pipeline): +class GrammarLikeAlt(PipelineSpace): _id = Operation(operator="Identity") _three = Operation(operator="Conv2D-3") _one = Operation(operator="Conv2D-1") diff --git a/tests/test_neps_space/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py index 87da79178..d50f338cd 100644 --- a/tests/test_neps_space/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -8,12 +8,12 @@ Categorical, Float, Operation, - Pipeline, + PipelineSpace, Resampled, ) -class HNASLikePipeline(Pipeline): +class HNASLikePipeline(PipelineSpace): """Based on the `hierarchical+shared` variant (cell block is shared everywhere). Across _CONVBLOCK items, _ACT and _CONV also shared. Only the _NORM changes. diff --git a/tests/test_neps_space/test_search_space__nos_like.py b/tests/test_neps_space/test_search_space__nos_like.py index 7574c89f7..ca882bb2a 100644 --- a/tests/test_neps_space/test_search_space__nos_like.py +++ b/tests/test_neps_space/test_search_space__nos_like.py @@ -7,12 +7,12 @@ Categorical, Integer, Operation, - Pipeline, + PipelineSpace, Resampled, ) -class NosBench(Pipeline): +class NosBench(PipelineSpace): _UNARY_FUN = Categorical( choices=( Operation(operator="Square"), diff --git a/tests/test_neps_space/test_search_space__recursion.py b/tests/test_neps_space/test_search_space__recursion.py index 70242ed8b..f2ca9d60d 100644 --- a/tests/test_neps_space/test_search_space__recursion.py +++ b/tests/test_neps_space/test_search_space__recursion.py @@ -7,7 +7,7 @@ Categorical, Float, Operation, - Pipeline, + PipelineSpace, Resampled, ) @@ -37,7 +37,7 @@ def __call__(self, values: Sequence[float]) -> float: return sum(values) -class DemoRecursiveOperationSpace(Pipeline): +class DemoRecursiveOperationSpace(PipelineSpace): # The way to sample `factor` values _factor = Float(min_value=0, max_value=1) diff --git a/tests/test_neps_space/test_search_space__resampled.py b/tests/test_neps_space/test_search_space__resampled.py index a11d34286..ee16760a2 100644 --- a/tests/test_neps_space/test_search_space__resampled.py +++ b/tests/test_neps_space/test_search_space__resampled.py @@ -9,12 +9,12 @@ Float, Integer, Operation, - Pipeline, + PipelineSpace, Resampled, ) -class ActPipelineSimpleFloat(Pipeline): +class ActPipelineSimpleFloat(PipelineSpace): prelu_init_value = Float( min_value=0, max_value=1000000, @@ -52,7 +52,7 @@ class ActPipelineSimpleFloat(Pipeline): ) -class ActPipelineComplexInteger(Pipeline): +class ActPipelineComplexInteger(PipelineSpace): prelu_init_value = Integer(min_value=0, max_value=1000000) prelu_shared1 = Operation( @@ -102,7 +102,7 @@ class ActPipelineComplexInteger(Pipeline): ) -class CellPipelineCategorical(Pipeline): +class CellPipelineCategorical(PipelineSpace): conv_block = Categorical( choices=( Operation(operator="conv1"), diff --git a/tests/test_neps_space/test_search_space__reuse_arch_elements.py b/tests/test_neps_space/test_search_space__reuse_arch_elements.py index b8b02c771..078f719d1 100644 --- a/tests/test_neps_space/test_search_space__reuse_arch_elements.py +++ b/tests/test_neps_space/test_search_space__reuse_arch_elements.py @@ -10,11 +10,11 @@ Float, Integer, Operation, - Pipeline, + PipelineSpace, ) -class ActPipelineSimple(Pipeline): +class ActPipelineSimple(PipelineSpace): prelu = Operation( operator="prelu", kwargs={"init": 0.1}, @@ -26,7 +26,7 @@ class ActPipelineSimple(Pipeline): ) -class ActPipelineComplex(Pipeline): +class ActPipelineComplex(PipelineSpace): prelu_init_value: float = Float(min_value=0.1, max_value=0.9) prelu = Operation( operator="prelu", @@ -37,7 +37,7 @@ class ActPipelineComplex(Pipeline): ) -class FixedPipeline(Pipeline): +class FixedPipeline(PipelineSpace): prelu_init_value: float = 0.5 prelu = Operation( operator="prelu", @@ -55,7 +55,7 @@ class FixedPipeline(Pipeline): ) -class ConvPipeline(Pipeline): +class ConvPipeline(PipelineSpace): conv_choices_prior_index: int = Integer( min_value=0, max_value=1, @@ -91,7 +91,7 @@ class ConvPipeline(Pipeline): ) -class CellPipeline(Pipeline): +class CellPipeline(PipelineSpace): _act = Operation(operator="relu") _conv = Operation(operator="conv3x3") _norm = Operation(operator="batch") diff --git a/tests/test_neps_space/utils.py b/tests/test_neps_space/utils.py index 837379925..9529c3c40 100644 --- a/tests/test_neps_space/utils.py +++ b/tests/test_neps_space/utils.py @@ -7,9 +7,9 @@ def generate_possible_config_strings( - pipeline: neps.space.neps_spaces.parameters.Pipeline, + pipeline: neps.space.neps_spaces.parameters.PipelineSpace, resolved_pipeline_attr_getter: Callable[ - [neps.space.neps_spaces.parameters.Pipeline], + [neps.space.neps_spaces.parameters.PipelineSpace], neps.space.neps_spaces.parameters.Operation, ], num_resolutions: int = 50_000, diff --git a/tests/test_state/test_neps_state.py b/tests/test_state/test_neps_state.py index fd64c43be..cdd3800f6 100644 --- a/tests/test_state/test_neps_state.py +++ b/tests/test_state/test_neps_state.py @@ -26,14 +26,14 @@ Fidelity, Float, Integer, - Pipeline, + PipelineSpace, ) from neps.state import BudgetInfo, NePSState, OptimizationState, SeedSnapshot @case -def case_search_space_no_fid() -> Pipeline: - class Space(Pipeline): +def case_search_space_no_fid() -> PipelineSpace: + class Space(PipelineSpace): a = Float(0, 1) b = Categorical(("a", "b", "c")) c = "a" @@ -43,8 +43,8 @@ class Space(Pipeline): @case -def case_search_space_with_fid() -> Pipeline: - class SpaceFid(Pipeline): +def case_search_space_with_fid() -> PipelineSpace: + class SpaceFid(PipelineSpace): a = Float(0, 1) b = Categorical(("a", "b", "c")) c = "a" @@ -55,8 +55,8 @@ class SpaceFid(Pipeline): @case -def case_search_space_no_fid_with_prior() -> Pipeline: - class SpacePrior(Pipeline): +def case_search_space_no_fid_with_prior() -> PipelineSpace: + class SpacePrior(PipelineSpace): a = Float(0, 1, prior=0.5, prior_confidence="medium") b = Categorical(("a", "b", "c"), prior=0, prior_confidence="medium") c = "a" @@ -66,8 +66,8 @@ class SpacePrior(Pipeline): @case -def case_search_space_fid_with_prior() -> Pipeline: - class SpaceFidPrior(Pipeline): +def case_search_space_fid_with_prior() -> PipelineSpace: + class SpaceFidPrior(PipelineSpace): a = Float(0, 1, prior=0.5, prior_confidence="medium") b = Categorical(("a", "b", "c"), prior=0, prior_confidence="medium") c = "a" @@ -136,8 +136,8 @@ class SpaceFidPrior(Pipeline): @parametrize("key", list(PredefinedOptimizers.keys())) @parametrize_with_cases("search_space", cases=".", prefix="case_search_space") def optimizer_and_key_and_search_space( - key: str, search_space: Pipeline -) -> tuple[AskFunction, str, Pipeline | SearchSpace]: + key: str, search_space: PipelineSpace +) -> tuple[AskFunction, str, PipelineSpace | SearchSpace]: if key in JUST_SKIP: pytest.xfail(f"{key} is not instantiable") @@ -169,9 +169,11 @@ def optimizer_and_key_and_search_space( return ( opt, key, - converted_space - if converted_space and key not in REQUIRES_NEPS_SPACE - else search_space, + ( + converted_space + if converted_space and key not in REQUIRES_NEPS_SPACE + else search_space + ), ) @@ -199,7 +201,9 @@ def case_neps_state_filebased( @parametrize_with_cases("neps_state", cases=".", prefix="case_neps_state") def test_sample_trial( neps_state: NePSState, - optimizer_and_key_and_search_space: tuple[AskFunction, str, Pipeline | SearchSpace], + optimizer_and_key_and_search_space: tuple[ + AskFunction, str, PipelineSpace | SearchSpace + ], capsys, ) -> None: optimizer, key, search_space = optimizer_and_key_and_search_space @@ -273,7 +277,9 @@ def test_sample_trial( def test_optimizers_work_roughly( - optimizer_and_key_and_search_space: tuple[AskFunction, str, Pipeline | SearchSpace], + optimizer_and_key_and_search_space: tuple[ + AskFunction, str, PipelineSpace | SearchSpace + ], ) -> None: opt, key, search_space = optimizer_and_key_and_search_space ask_and_tell = AskAndTell(opt) From 0db699f1a4b2cc1c2a3712940c37b6bf69540ef0 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Wed, 30 Jul 2025 18:32:17 +0200 Subject: [PATCH 046/156] Add resolving for tuples and lists --- neps/space/neps_spaces/neps_space.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index b91120271..d8f7f100d 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -609,6 +609,32 @@ def _( context.add_resolved(fidelity_obj, result) return result + @_resolver_dispatch.register + def _( + self, + resolvable_obj: tuple, + context: SamplingResolutionContext, # noqa: ARG002 + ) -> Any: + return tuple(self._resolve_collection(resolvable_obj, context)) + + @_resolver_dispatch.register + def _( + self, + resolvable_obj: list, + context: SamplingResolutionContext, # noqa: ARG002 + ) -> Any: + return self._resolve_collection(resolvable_obj, context) + + def _resolve_collection( + self, + resolvable_obj: tuple | list, + context: SamplingResolutionContext, # noqa: ARG002 + ) -> list[Any]: + result = [] + for idx, item in enumerate(resolvable_obj): + result.append(self._resolve(item, f"collection[{idx}]", context)) + return result + @_resolver_dispatch.register def _( self, From 07b923cc2c6d52343ba98b6fe4cc9fbcf8b921a6 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Wed, 30 Jul 2025 18:53:45 +0200 Subject: [PATCH 047/156] Fix test --- .../test_search_space__grammar_like.py | 157 +++++++++++++++--- 1 file changed, 133 insertions(+), 24 deletions(-) diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index 4c79f7d73..8c683443f 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -327,63 +327,172 @@ def test_resolve_context(): def test_resolve_context_alt(): samplings_to_make = { "Resolvable.S.args.resampled_categorical::categorical__6": 3, - "Resolvable.S.args[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 2 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 2 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 5 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 0 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__4": ( + 2 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__6": ( 1 ), - "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__6": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[2].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__6": ( - 3 + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[3].resampled_categorical::categorical__3": ( + 0 ), - "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[4].resampled_categorical::categorical__3": ( + 0 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[5].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[6].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args[0].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[1].resampled_operation.args[0].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[7].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__4": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args[1].resampled_operation.args.resampled_categorical::categorical__4": ( - 3 + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__6": ( + 5 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 2 ), - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 5 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( 3 ), - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__4": ( 0 ), - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__4": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__6": ( + 2 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[2].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[3].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[4].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[5].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[6].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[7].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args.resampled_categorical::categorical__4": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__6": ( + 5 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__6": ( 3 ), - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args.resampled_categorical::categorical__6": ( - 4 + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + 0 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__4": ( + 0 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__4": ( + 0 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_categorical::categorical__3": ( + 0 ), - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[0].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[2].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[3].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.args[1].resampled_operation.args[0].resampled_operation.args[1].resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[4].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[5].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[6].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[7].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[2].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[3].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[4].resampled_categorical::categorical__3": ( + 2 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[5].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[6].resampled_categorical::categorical__3": ( + 1 + ), + "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[7].resampled_categorical::categorical__3": ( 0 ), } expected_s_config_string = ( - "(Sequential (Sequential (Sequential (Identity) (Sequential (Sequential" - " (ReLUConvBN)) (Sequential (Conv2D-3))) (ReLUConvBN))) (Sequential (Sequential" - " (Sequential (Sequential (Conv2D-3))) (Sequential (Sequential (Conv2D-1)" - " (Identity) (Conv2D-3))))))" + "(Sequential (Sequential (Sequential (Sequential (Sequential " + "(Sequential (Conv2D-3) (Sequential (ReLUConvBN)))) (Sequential " + "(ReLUConvBN)) (Identity) (Conv2D-3) (Conv2D-3) (Conv2D-1) (Conv2D-3) " + "(Identity)))) (Sequential (Conv2D-3) (Sequential (Sequential " + "(Sequential (Sequential (Sequential (ReLUConvBN)) (Sequential " + "(Conv2D-1))) (Sequential (Sequential (ReLUConvBN))) (Conv2D-1) " + "(Conv2D-1) (Identity) (Conv2D-1) (Conv2D-3) (Conv2D-3))) " + "(Sequential (Sequential (ReLUConvBN)) (Sequential (Sequential " + "(Sequential (Identity))) (Sequential (Conv2D-3))) (Conv2D-1) " + "(Identity) (Conv2D-1) (Conv2D-1) (Conv2D-1) (Identity)) (Identity) " + "(Identity) (Identity) (Conv2D-1) (Conv2D-1) (Conv2D-3)) (ReLUConvBN)))" ) pipeline = GrammarLikeAlt() From 4f8d778a9f508b0f22f2989f00aea5f8f9c6f7d8 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Wed, 30 Jul 2025 19:06:50 +0200 Subject: [PATCH 048/156] Change neps_spaces tests to use a /tmp/ directory --- tests/test_neps_space/test_neps_integration.py | 8 ++++---- .../test_neps_integration_priorband__max_cost.py | 4 ++-- .../test_neps_integration_priorband__max_evals.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index a6cc71f14..1088cd029 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -162,7 +162,7 @@ class DemoHyperparameterComplexSpace(PipelineSpace): ) def test_hyperparameter_demo(optimizer): pipeline_space = DemoHyperparameterSpace() - root_directory = f"results/hyperparameter_demo__{optimizer.func.__name__}" + root_directory = f"/tmp/test_neps_spaces/results/hyperparameter_demo__{optimizer.func.__name__}" neps.run( evaluate_pipeline=hyperparameter_pipeline_to_optimize, @@ -186,7 +186,7 @@ def test_hyperparameter_demo(optimizer): def test_hyperparameter_with_fidelity_demo(optimizer): pipeline_space = DemoHyperparameterWithFidelitySpace() root_directory = ( - f"results/hyperparameter_with_fidelity_demo__{optimizer.func.__name__}" + f"/tmp/test_neps_spaces/results/hyperparameter_with_fidelity_demo__{optimizer.func.__name__}" ) neps.run( @@ -210,7 +210,7 @@ def test_hyperparameter_with_fidelity_demo(optimizer): ) def test_hyperparameter_complex_demo(optimizer): pipeline_space = DemoHyperparameterComplexSpace() - root_directory = f"results/hyperparameter_complex_demo__{optimizer.func.__name__}" + root_directory = f"/tmp/test_neps_spaces/results/hyperparameter_complex_demo__{optimizer.func.__name__}" neps.run( evaluate_pipeline=hyperparameter_pipeline_to_optimize, @@ -335,7 +335,7 @@ class DemoOperationSpace(PipelineSpace): ) def test_operation_demo(optimizer): pipeline_space = DemoOperationSpace() - root_directory = f"results/operation_demo__{optimizer.__name__}" + root_directory = f"/tmp/test_neps_spaces/results/operation_demo__{optimizer.__name__}" neps.run( evaluate_pipeline=operation_pipeline_to_optimize, diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index c3be188b6..82319a5bd 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -93,7 +93,7 @@ class DemoHyperparameterWithFidelitySpace(PipelineSpace): def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): optimizer.__name__ = optimizer_name # Needed by NEPS later. pipeline_space = DemoHyperparameterWithFidelitySpace() - root_directory = f"results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" + root_directory = f"/tmp/test_neps_spaces/results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" # Reset the _COSTS global, so they do not get mixed up between tests. _COSTS.clear() @@ -134,7 +134,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): optimizer.__name__ = optimizer_name # Needed by NEPS later. pipeline_space = DemoHyperparameterWithFidelitySpace() - root_directory = f"results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" + root_directory = f"/tmp/test_neps_spaces/results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" # Reset the _COSTS global, so they do not get mixed up between tests. _COSTS.clear() diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index 985c4a3ad..f3b542f04 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -80,7 +80,7 @@ class DemoHyperparameterWithFidelitySpace(PipelineSpace): def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): optimizer.__name__ = optimizer_name # Needed by NEPS later. pipeline_space = DemoHyperparameterWithFidelitySpace() - root_directory = f"results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" + root_directory = f"/tmp/test_neps_spaces/results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" neps.run( evaluate_pipeline=evaluate_pipeline, @@ -118,7 +118,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): optimizer.__name__ = optimizer_name # Needed by NEPS later. pipeline_space = DemoHyperparameterWithFidelitySpace() - root_directory = f"results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" + root_directory = f"/tmp/test_neps_spaces/results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" neps.run( evaluate_pipeline=evaluate_pipeline, From dbb596468a0a3f92a122db2d8c88a2ddc279fc99 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Wed, 30 Jul 2025 19:27:48 +0200 Subject: [PATCH 049/156] Add TODO note to fix new issue with eager tuple/list resolving --- neps/space/neps_spaces/neps_space.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index d8f7f100d..8d647d5d7 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -440,6 +440,9 @@ def _( # If we add a `_resolve_tuple` functionality to go into tuples # and resolve their contents, the call below will likely # lead to too much work being done or issues. + # TODO: since we added resolving for tuples/lists, it can be + # that the assumptions from the above comment do not apply + # and the behavior is now broken resolved_attr_value = self._resolve( initial_attr_value, attr_name, context ) From 5beebfa416d4dc71965d5f0a7c81ff2e1b349b10 Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 30 Jul 2025 20:32:18 +0200 Subject: [PATCH 050/156] Enhance operation conversion to handle tuples and lists in arguments and kwargs --- neps/space/neps_spaces/neps_space.py | 50 ++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 8d647d5d7..3f9ad5b09 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -616,7 +616,7 @@ def _( def _( self, resolvable_obj: tuple, - context: SamplingResolutionContext, # noqa: ARG002 + context: SamplingResolutionContext, ) -> Any: return tuple(self._resolve_collection(resolvable_obj, context)) @@ -624,14 +624,14 @@ def _( def _( self, resolvable_obj: list, - context: SamplingResolutionContext, # noqa: ARG002 + context: SamplingResolutionContext, ) -> Any: return self._resolve_collection(resolvable_obj, context) def _resolve_collection( self, resolvable_obj: tuple | list, - context: SamplingResolutionContext, # noqa: ARG002 + context: SamplingResolutionContext, ) -> list[Any]: result = [] for idx, item in enumerate(resolvable_obj): @@ -707,19 +707,43 @@ def convert_operation_to_callable(operation: Operation) -> Callable: """ operator = cast(Callable, operation.operator) - operation_args = [] + operation_args: list[Any] = [] for arg in operation.args: - operation_args.append( - convert_operation_to_callable(arg) if isinstance(arg, Operation) else arg - ) + if isinstance(arg, tuple | list): + arg_sequence: list[Any] = [] + for a in arg: + converted_arg = ( + convert_operation_to_callable(a) if isinstance(a, Operation) else a + ) + arg_sequence.append(converted_arg) + if isinstance(arg, tuple): + operation_args.append(tuple(arg_sequence)) + else: + operation_args.append(arg_sequence) + else: + operation_args.append( + convert_operation_to_callable(arg) if isinstance(arg, Operation) else arg + ) - operation_kwargs = {} + operation_kwargs: dict[str, Any] = {} for kwarg_name, kwarg_value in operation.kwargs.items(): - operation_kwargs[kwarg_name] = ( - convert_operation_to_callable(kwarg_value) - if isinstance(kwarg_value, Operation) - else kwarg_value - ) + if isinstance(kwarg_value, tuple | list): + kwarg_sequence: list[Any] = [] + for a in kwarg_value: + converted_kwarg = ( + convert_operation_to_callable(a) if isinstance(a, Operation) else a + ) + kwarg_sequence.append(converted_kwarg) + if isinstance(kwarg_value, tuple): + operation_kwargs[kwarg_name] = tuple(kwarg_sequence) + else: + operation_kwargs[kwarg_name] = kwarg_sequence + else: + operation_kwargs[kwarg_name] = ( + convert_operation_to_callable(kwarg_value) + if isinstance(kwarg_value, Operation) + else kwarg_value + ) return cast(Callable, operator(*operation_args, **operation_kwargs)) From 40dadc4108ede2bf5a27b606ea377954ac0f3a9c Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Wed, 30 Jul 2025 20:57:37 +0200 Subject: [PATCH 051/156] Add `Lazy` component and use it to stop categoricals from eagerly resolving provided choices --- neps/space/neps_spaces/neps_space.py | 85 +++++++++++++++++++++++----- neps/space/neps_spaces/parameters.py | 56 ++++++++++++++++++ 2 files changed, 127 insertions(+), 14 deletions(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 3f9ad5b09..4fa5b0e88 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -24,6 +24,7 @@ PipelineSpace, Resampled, Resolvable, + _Lazy, ) from neps.space.neps_spaces.sampling import ( DomainSampler, @@ -430,23 +431,66 @@ def _( for attr_name, initial_attr_value in initial_attrs.items(): if attr_name == "choices": - if isinstance(initial_attr_value, Resolvable): - # Resolving here like below works fine since the expectation - # is that we will get back a tuple of choices. - # Any element in that tuple can be a Resolvable, - # but will not be resolved from the call directly below, - # as the tuple is returned as is, - # without going into resolving its elements. - # If we add a `_resolve_tuple` functionality to go into tuples - # and resolve their contents, the call below will likely - # lead to too much work being done or issues. - # TODO: since we added resolving for tuples/lists, it can be - # that the assumptions from the above comment do not apply - # and the behavior is now broken + # We need special handling if we are dealing with a "choice provider", + # which will select a tuple of choices from its own choices, + # from which then this original categorical will pick. + + # Ideally, from the choices provided, we want to first pick one, + # and then only resolve that picked item. + # We don't want the resolution process to directly go inside + # the tuple of provided choices that gets picked from the provider, + # since that would lead to potentially exponential growth + # and in resolving stuff what will ultimately be useless to us. + + # For this reason, if we haven't already sampled this categorical + # (the choice provider), we make sure to wrap each of the choices + # inside it the provider in a `Lazy` resolvable. + # This ensures that the resolving process stops directly after + # the provider has made its choice. + + # Since we may be manually creating new objects that get resolved, + # it's important that we manually add to the context + # the original objects, because they can be possibly reused elsewhere. + + if ( + isinstance(initial_attr_value, Categorical) + and context.was_already_resolved(initial_attr_value) + ): + # Before making adjustments, we make sure we haven't + # already chosen a value for the provider. + # Otherwise, we already have the final answer for it. + resolved_attr_value = context.get_resolved(initial_attr_value) + elif ( + isinstance(initial_attr_value, Categorical) or + ( + isinstance(initial_attr_value, Resampled) + and isinstance(initial_attr_value.source, Categorical) + ) + ): + # We have a previously unseen provider. Adjust its internals. + choice_provider_final_attrs = {**initial_attr_value.get_attrs()} + choice_provider_choices = choice_provider_final_attrs["choices"] + if isinstance(choice_provider_choices, (tuple, list)): + choice_provider_choices = tuple( + _Lazy(content=choice) for choice in choice_provider_choices + ) + choice_provider_final_attrs["choices"] = choice_provider_choices + choice_provider_adjusted = initial_attr_value.from_attrs( + choice_provider_final_attrs + ) + resolved_attr_value = self._resolve( - initial_attr_value, attr_name, context + choice_provider_adjusted, "choice_provider", context ) + + if not isinstance(initial_attr_value, Resampled): + # It's important that we manually add this here, + # as we manually created a different object from the original. + # In case the original categorical is used again, + # it will need to be reused with the final value we got here. + context.add_resolved(initial_attr_value, resolved_attr_value) else: + # We have "choices" which are ready to use. resolved_attr_value = initial_attr_value else: resolved_attr_value = self._resolve( @@ -612,6 +656,19 @@ def _( context.add_resolved(fidelity_obj, result) return result + @_resolver_dispatch.register + def _( + self, + resolvable_obj: _Lazy, + context: SamplingResolutionContext, # noqa: ARG002 + ) -> Any: + # When resolving a lazy resolvable, + # just directly return the content it's holding. + # The purpose of the lazy resolvable is to stop + # the resolver from going deeper into the process. + # In this case, to stop the resolution of `resolvable_obj.content`. + return resolvable_obj.content + @_resolver_dispatch.register def _( self, diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 974d2ea08..138181f3f 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -1126,4 +1126,60 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: raise ValueError( f"Source should be a resolvable object. Is: {self._source!r}." ) + + # It's okay that we return this directly, since it will be a new object. return self._source.from_attrs(attrs) + + +class _Lazy(Resolvable): + """A class representing a lazy operation in a NePS space. + + The purpose is to have the resolution process + stop at the moment it gets to this object, + preventing the resolution of the object it wraps. + + Attributes: + content: The content held, which can be a resolvable object or a + tuple or a string. + """ + + def __init__(self, content: Resolvable | tuple[Any] | str): + self._content = content + + @property + def content(self) -> Resolvable | tuple[Any] | str: + """Get the content being held. + + Returns: + The content of the lazy resolvable, which can be a resolvable object + or a tuple or a string. + """ + return self._content + + def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the lazy resolvable as a mapping. + + Raises: + ValueError: Always, since this operation does not make sense here. + """ + raise ValueError( + f"This is a lazy resolvable. Can't get attrs from it: {self.content!r}." + ) + + def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: + """Create a new resolvable object from the given attributes. + + Args: + attrs: A mapping of attribute names to their values. + + Returns: + A new resolvable object created from the specified attributes. + + Raises: + ValueError: Always, since this operation does not make sense here. + + + """ + raise ValueError( + f"This is a lazy resolvable. Can't create object for it: {self.content!r}." + ) From 0bd1449af39a633145a171876475d240aff6788a Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Thu, 31 Jul 2025 00:31:57 +0200 Subject: [PATCH 052/156] Add dict resolving and simplify operation resolving --- neps/space/neps_spaces/neps_space.py | 77 ++++++------- neps/space/neps_spaces/parameters.py | 2 +- .../test_search_space__grammar_like.py | 102 +++++++++--------- 3 files changed, 84 insertions(+), 97 deletions(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 4fa5b0e88..24f9d4abf 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -440,17 +440,19 @@ def _( # We don't want the resolution process to directly go inside # the tuple of provided choices that gets picked from the provider, # since that would lead to potentially exponential growth - # and in resolving stuff what will ultimately be useless to us. + # and in resolving stuff that will ultimately be useless to us. # For this reason, if we haven't already sampled this categorical # (the choice provider), we make sure to wrap each of the choices - # inside it the provider in a `Lazy` resolvable. + # inside it in a lazy resolvable. # This ensures that the resolving process stops directly after # the provider has made its choice. - # Since we may be manually creating new objects that get resolved, - # it's important that we manually add to the context - # the original objects, because they can be possibly reused elsewhere. + # Since we may be manually creating a new categorical object + # for the provider, which is what will then get resolved, + # it's important that we manually store + # in the context that resolved value for the original object. + # The original object can possibly be reused elsewhere. if ( isinstance(initial_attr_value, Categorical) @@ -467,7 +469,10 @@ def _( and isinstance(initial_attr_value.source, Categorical) ) ): - # We have a previously unseen provider. Adjust its internals. + # We have a previously unseen provider. + # Create a new object where the choices are lazy, + # and then sample from it, manually tracking the context. + choice_provider_final_attrs = {**initial_attr_value.get_attrs()} choice_provider_choices = choice_provider_final_attrs["choices"] if isinstance(choice_provider_choices, (tuple, list)): @@ -482,12 +487,11 @@ def _( resolved_attr_value = self._resolve( choice_provider_adjusted, "choice_provider", context ) - if not isinstance(initial_attr_value, Resampled): - # It's important that we manually add this here, + # It's important that we handle filling the context here, # as we manually created a different object from the original. # In case the original categorical is used again, - # it will need to be reused with the final value we got here. + # it will need to be reused with the final value we resolved. context.add_resolved(initial_attr_value, resolved_attr_value) else: # We have "choices" which are ready to use. @@ -528,13 +532,6 @@ def _( if context.was_already_resolved(operation_obj): return context.get_resolved(operation_obj) - # It is possible that the `operation_obj` will require two runs to be fully - # resolved. For example if `operation_obj.args` is not defined as a tuple of - # args, but is a Resolvable that needs to be resolved first itself, - # for us to have the actual tuple of args. - - # First run. - initial_attrs = operation_obj.get_attrs() final_attrs = {} needed_resolving = False @@ -546,30 +543,9 @@ def _( initial_attr_value is not resolved_attr_value ) - operation_obj_first_run = operation_obj + result = operation_obj if needed_resolving: - operation_obj_first_run = operation_obj.from_attrs(final_attrs) - - # Second run. - # It is possible the first run was enough, - # in this case what we do below won't lead to any changes. - - initial_attrs = operation_obj_first_run.get_attrs() - final_attrs = {} - needed_resolving = False - - for attr_name, initial_attr_value in initial_attrs.items(): - resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) - final_attrs[attr_name] = resolved_attr_value - needed_resolving = needed_resolving or ( - initial_attr_value is not resolved_attr_value - ) - - operation_obj_second_run = operation_obj_first_run - if needed_resolving: - operation_obj_second_run = operation_obj_first_run.from_attrs(final_attrs) - - result = operation_obj_second_run + result = operation_obj.from_attrs(final_attrs) context.add_resolved(operation_obj, result) return result @@ -669,30 +645,41 @@ def _( # In this case, to stop the resolution of `resolvable_obj.content`. return resolvable_obj.content + @_resolver_dispatch.register + def _( + self, + resolvable_obj: dict, + context: SamplingResolutionContext, + ) -> dict[Any, Any]: + result = {} + for k, v in resolvable_obj.items(): + result[k] = self._resolve(v, f"mapping_value{{{k}}}", context) + return result + @_resolver_dispatch.register def _( self, resolvable_obj: tuple, context: SamplingResolutionContext, - ) -> Any: - return tuple(self._resolve_collection(resolvable_obj, context)) + ) -> tuple[Any]: + return tuple(self._resolve_sequence(resolvable_obj, context)) @_resolver_dispatch.register def _( self, resolvable_obj: list, context: SamplingResolutionContext, - ) -> Any: - return self._resolve_collection(resolvable_obj, context) + ) -> list[Any]: + return self._resolve_sequence(resolvable_obj, context) - def _resolve_collection( + def _resolve_sequence( self, resolvable_obj: tuple | list, context: SamplingResolutionContext, ) -> list[Any]: result = [] for idx, item in enumerate(resolvable_obj): - result.append(self._resolve(item, f"collection[{idx}]", context)) + result.append(self._resolve(item, f"sequence[{idx}]", context)) return result @_resolver_dispatch.register diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 138181f3f..3cb9e4872 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -399,7 +399,7 @@ class Categorical(Domain[int], Generic[T]): def __init__( self, - choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T], + choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T] | Resolvable, prior: int | Domain[int] | _Unset = _UNSET, prior_confidence: ( ConfidenceLevel | Literal["low", "medium", "high"] | _Unset diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index 8c683443f..362d4ea20 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -327,157 +327,157 @@ def test_resolve_context(): def test_resolve_context_alt(): samplings_to_make = { "Resolvable.S.args.resampled_categorical::categorical__6": 3, - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__6": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__6": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__6": ( 5 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__6": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__4": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__4": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical::categorical__6": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical::categorical__6": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[2].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[2].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[3].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[3].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[4].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[4].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[5].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[5].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[6].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[6].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[7].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[7].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__4": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical::categorical__4": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical::categorical__6": ( 5 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__6": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__6": ( 5 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__6": ( 3 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__6": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__4": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical::categorical__4": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical::categorical__6": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__6": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[2].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[2].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[3].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[3].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[4].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[4].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[5].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[5].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[6].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[6].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[7].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[7].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical::categorical__6": ( 5 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__6": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical::categorical__6": ( 3 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__6": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__6": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical::categorical__4": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical::categorical__4": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical::categorical__4": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical::categorical__4": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[0].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[0].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[2].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[2].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[3].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[3].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[4].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[4].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[5].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[5].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[6].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[6].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[7].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[7].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[2].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[2].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[3].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[3].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[4].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[4].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[5].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[5].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[6].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[6].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[1].resampled_operation.args.resampled_categorical.sampled_value.collection[7].resampled_categorical::categorical__3": ( + "Resolvable.S.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[1].resampled_operation.args.resampled_categorical.sampled_value.sequence[7].resampled_categorical::categorical__3": ( 0 ), } From 0f015a3b4419232743119b515db1a7b8d0ec48f9 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Fri, 1 Aug 2025 22:32:29 +0200 Subject: [PATCH 053/156] Simplify resolving Operations and correct resolving for sequences and dicts --- neps/space/neps_spaces/neps_space.py | 57 +++++++++++++++--- neps/space/neps_spaces/parameters.py | 32 +--------- pyproject.toml | 2 +- .../test_search_space__grammar_like.py | 60 +++++++++---------- .../test_search_space__hnas_like.py | 38 ++++++------ .../test_search_space__resampled.py | 4 -- .../test_search_space__reuse_arch_elements.py | 31 +++++++--- 7 files changed, 125 insertions(+), 99 deletions(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 24f9d4abf..f37fe974b 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -573,6 +573,16 @@ def _( initial_attrs = resampled_obj.get_attrs() resolvable_to_resample_obj = resampled_obj.from_attrs(initial_attrs) + if resolvable_to_resample_obj is resampled_obj.source: + # The final resolvable we are resolving needs to be a different + # instance from the original wrapped object. + # Otherwise, it's possible we'll be taking its result + # from the context cache, instead of resampling it. + raise ValueError( + "The final object must be a different instance from the original: " + f"{resolvable_to_resample_obj!r}" + ) + type_name = type(resolvable_to_resample_obj).__name__.lower() return self._resolve( resolvable_to_resample_obj, f"resampled_{type_name}", context @@ -651,9 +661,23 @@ def _( resolvable_obj: dict, context: SamplingResolutionContext, ) -> dict[Any, Any]: - result = {} - for k, v in resolvable_obj.items(): - result[k] = self._resolve(v, f"mapping_value{{{k}}}", context) + # The logic below is done so that if the original dict + # had only things that didn't need resolving, + # we return the original object. + # That is important for the rest of the resolving process. + original_dict = resolvable_obj + new_dict = {} + needed_resolving = False + + for k, initial_v in original_dict.items(): + resolved_v = self._resolve(initial_v, f"mapping_value{{{k}}}", context) + new_dict[k] = resolved_v + needed_resolving = needed_resolving or (resolved_v is not initial_v) + + result = original_dict + if needed_resolving: + result = new_dict + return result @_resolver_dispatch.register @@ -662,7 +686,7 @@ def _( resolvable_obj: tuple, context: SamplingResolutionContext, ) -> tuple[Any]: - return tuple(self._resolve_sequence(resolvable_obj, context)) + return self._resolve_sequence(resolvable_obj, context) @_resolver_dispatch.register def _( @@ -676,10 +700,27 @@ def _resolve_sequence( self, resolvable_obj: tuple | list, context: SamplingResolutionContext, - ) -> list[Any]: - result = [] - for idx, item in enumerate(resolvable_obj): - result.append(self._resolve(item, f"sequence[{idx}]", context)) + ) -> list[Any] | tuple[Any]: + # The logic below is done so that if the original sequence + # had only things that didn't need resolving, + # we return the original object. + # That is important for the rest of the resolving process. + original_sequence = resolvable_obj + new_list = [] + needed_resolving = False + + for idx, item in enumerate(original_sequence): + resolved_item = self._resolve(item, f"sequence[{idx}]", context) + new_list.append(resolved_item) + needed_resolving = needed_resolving or (item is not resolved_item) + + result = original_sequence + if needed_resolving: + # We also want to return a result of the same type + # as the original received value. + original_type = type(original_sequence) + result = original_type(new_list) + return result @_resolver_dispatch.register diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 3cb9e4872..0ce258f7d 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -1003,20 +1003,7 @@ def get_attrs(self) -> Mapping[str, Any]: A mapping of attribute names to their values. """ - # TODO: [lum] simplify this. We know the fields. Maybe other places too. - result: dict[str, Any] = {} - for name, value in vars(self).items(): - stripped_name = name.lstrip("_") - if isinstance(value, dict): - for k, v in value.items(): - # Multiple {{}} needed to escape surrounding '{' and '}'. - result[f"{stripped_name}{{{k}}}"] = v - elif isinstance(value, tuple): - for i, v in enumerate(value): - result[f"{stripped_name}[{i}]"] = v - else: - result[stripped_name] = value - return result + return {k.lstrip("_"): v for k, v in vars(self).items()} def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: """Create a new Operation instance from the given attributes. @@ -1031,20 +1018,7 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: ValueError: If the attributes do not match the operation's expected structure. """ - # TODO: [lum] simplify this. We know the fields. Maybe other places too. - final_attrs: dict[str, Any] = {} - for name, value in attrs.items(): - if "{" in name and "}" in name: - base, key = name.split("{") - key = key.rstrip("}") - final_attrs.setdefault(base, {})[key] = value - elif "[" in name and "]" in name: - base, idx_str = name.split("[") - idx = int(idx_str.rstrip("]")) - final_attrs.setdefault(base, []).insert(idx, value) - else: - final_attrs[name] = value - return type(self)(**final_attrs) + return type(self)(**attrs) class Resampled(Resolvable): @@ -1126,8 +1100,6 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: raise ValueError( f"Source should be a resolvable object. Is: {self._source!r}." ) - - # It's okay that we return this directly, since it will be a new object. return self._source.from_attrs(attrs) diff --git a/pyproject.toml b/pyproject.toml index 78acfde3f..15383b4d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,7 @@ classifiers = [ requires-python = ">=3.10,<3.14" dependencies = [ - "numpy>=2.0", + "numpy", "pandas>=2.0,<3.0", "networkx>=2.6.3,<3.0", "scipy>=1.13.1", diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index 362d4ea20..df92b6eec 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -199,94 +199,94 @@ def test_resolve_alt(): def test_resolve_context(): samplings_to_make = { "Resolvable.S::categorical__6": 5, - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical::categorical__6": ( 3 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical::categorical__6": ( 1 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__4": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical::categorical__4": ( 1 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical::categorical__6": ( 5 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical::categorical__6": ( 0 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__4": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical::categorical__4": ( 3 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical::categorical__6": ( 4 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[2].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[2].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical::categorical__6": ( 1 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[2].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[2].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[3].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[3].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[4].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[4].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[5].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[5].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[6].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[6].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[7].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[7].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[1].resampled_categorical::categorical__6": ( 2 ), - "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical::categorical__6": ( 2 ), - "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__6": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical::categorical__6": ( 0 ), - "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__4": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical::categorical__4": ( 2 ), - "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.sampled_value.resampled_operation.args[1].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_categorical.sampled_value.resampled_operation.args[1].resampled_categorical::categorical__6": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[1].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[1].resampled_categorical::categorical__6": ( 1 ), - "Resolvable.S.sampled_value.resampled_operation.args[2].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[2].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.sampled_value.resampled_operation.args[3].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[3].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.sampled_value.resampled_operation.args[4].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[4].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.sampled_value.resampled_operation.args[5].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[5].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.S.sampled_value.resampled_operation.args[6].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[6].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.S.sampled_value.resampled_operation.args[7].resampled_categorical::categorical__3": ( + "Resolvable.S.sampled_value.resampled_operation.args.sequence[7].resampled_categorical::categorical__3": ( 1 ), } diff --git a/tests/test_neps_space/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py index d50f338cd..9b323268b 100644 --- a/tests/test_neps_space/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -231,46 +231,46 @@ def test_hnas_like_string(): def test_hnas_like_context(): samplings_to_make = { - "Resolvable.CL.args[0].resampled_categorical::categorical__4": 3, - "Resolvable.CL.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[0]::categorical__4": ( + "Resolvable.CL.args.sequence[0].resampled_categorical::categorical__4": 3, + "Resolvable.CL.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_operation.args.sequence[0]::categorical__4": ( 0 ), - "Resolvable.CL.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[1]::categorical__3": ( + "Resolvable.CL.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_operation.args.sequence[1]::categorical__3": ( 2 ), - "Resolvable.CL.args[0].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3": ( + "Resolvable.CL.args.sequence[0].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_operation.args.sequence[2].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.CL.args[1].resampled_categorical::categorical__4": 0, - "Resolvable.CL.args[2].resampled_categorical::categorical__4": 1, - "Resolvable.CL.args[3].resampled_categorical::categorical__4": 2, - "Resolvable.CL.args[4].resampled_categorical::categorical__4": 3, - "Resolvable.CL.args[4].resampled_categorical.sampled_value.resampled_operation.args[0].resampled_operation.args[2].resampled_categorical::categorical__3": ( + "Resolvable.CL.args.sequence[1].resampled_categorical::categorical__4": 0, + "Resolvable.CL.args.sequence[2].resampled_categorical::categorical__4": 1, + "Resolvable.CL.args.sequence[3].resampled_categorical::categorical__4": 2, + "Resolvable.CL.args.sequence[4].resampled_categorical::categorical__4": 3, + "Resolvable.CL.args.sequence[4].resampled_categorical.sampled_value.resampled_operation.args.sequence[0].resampled_operation.args.sequence[2].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.CL.args[5].resampled_categorical::categorical__4": 0, + "Resolvable.CL.args.sequence[5].resampled_categorical::categorical__4": 0, "Resolvable.ARCH::categorical__3": 1, - "Resolvable.ARCH.sampled_value.args[0].resampled_categorical::categorical__3": 2, - "Resolvable.ARCH.sampled_value.args[0].resampled_categorical.sampled_value.args[0].resampled_categorical::categorical__3": ( + "Resolvable.ARCH.sampled_value.args.sequence[0].resampled_categorical::categorical__3": 2, + "Resolvable.ARCH.sampled_value.args.sequence[0].resampled_categorical.sampled_value.args.sequence[0].resampled_categorical::categorical__3": ( 2 ), - "Resolvable.ARCH.sampled_value.args[0].resampled_categorical.sampled_value.args[1].resampled_categorical::categorical__3": ( + "Resolvable.ARCH.sampled_value.args.sequence[0].resampled_categorical.sampled_value.args.sequence[1].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.ARCH.sampled_value.args[1].resampled_categorical::categorical__3": 2, - "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[0].resampled_categorical::categorical__3": ( + "Resolvable.ARCH.sampled_value.args.sequence[1].resampled_categorical::categorical__3": 2, + "Resolvable.ARCH.sampled_value.args.sequence[1].resampled_categorical.sampled_value.args.sequence[0].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[1].resampled_categorical::categorical__3": ( + "Resolvable.ARCH.sampled_value.args.sequence[1].resampled_categorical.sampled_value.args.sequence[1].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[2].resampled_categorical::categorical__3": ( + "Resolvable.ARCH.sampled_value.args.sequence[1].resampled_categorical.sampled_value.args.sequence[2].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.ARCH.sampled_value.args[1].resampled_categorical.sampled_value.args[3].resampled_categorical::categorical__3": ( + "Resolvable.ARCH.sampled_value.args.sequence[1].resampled_categorical.sampled_value.args.sequence[3].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.ARCH.sampled_value.args[2].resampled_categorical::categorical__3": 2, + "Resolvable.ARCH.sampled_value.args.sequence[2].resampled_categorical::categorical__3": 2, } expected_cl_config_string = ( diff --git a/tests/test_neps_space/test_search_space__resampled.py b/tests/test_neps_space/test_search_space__resampled.py index ee16760a2..fc4fe3450 100644 --- a/tests/test_neps_space/test_search_space__resampled.py +++ b/tests/test_neps_space/test_search_space__resampled.py @@ -281,10 +281,6 @@ def test_resampled_categorical(): cell_args1 = cell.args[0] cell_args2 = cell.args[1] - cell.args[2] - cell.args[3] - cell.args[4] - cell.args[5] assert cell_args1 is op1 assert cell_args2 is op2 diff --git a/tests/test_neps_space/test_search_space__reuse_arch_elements.py b/tests/test_neps_space/test_search_space__reuse_arch_elements.py index 078f719d1..c5554236a 100644 --- a/tests/test_neps_space/test_search_space__reuse_arch_elements.py +++ b/tests/test_neps_space/test_search_space__reuse_arch_elements.py @@ -15,14 +15,18 @@ class ActPipelineSimple(PipelineSpace): - prelu = Operation( - operator="prelu", + prelu_with_args = Operation( + operator="prelu_with_args", + args=(0.1, 0.2), + ) + prelu_with_kwargs = Operation( + operator="prelu_with_kwargs", kwargs={"init": 0.1}, ) relu = Operation(operator="relu") act: Operation = Categorical( - choices=(prelu, relu), + choices=(prelu_with_args, prelu_with_kwargs, relu), ) @@ -130,17 +134,30 @@ def test_nested_simple(): resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) assert resolved_pipeline is not None - assert tuple(resolved_pipeline.get_attrs().keys()) == ("prelu", "relu", "act") + assert tuple(resolved_pipeline.get_attrs().keys()) == ( + "prelu_with_args", + "prelu_with_kwargs", + "relu", + "act", + ) - assert resolved_pipeline.prelu is pipeline.prelu + assert resolved_pipeline.prelu_with_kwargs is pipeline.prelu_with_kwargs + assert resolved_pipeline.prelu_with_args is pipeline.prelu_with_args assert resolved_pipeline.relu is pipeline.relu + assert resolved_pipeline.act in ( + resolved_pipeline.prelu_with_kwargs, + resolved_pipeline.prelu_with_args, + resolved_pipeline.relu, + ) + @pytest.mark.repeat(50) def test_nested_simple_string(): possible_cell_config_strings = { "(relu)", - "(prelu {'init': 0.1})", + "(prelu_with_args (0.1) (0.2))", + "(prelu_with_kwargs {'init': 0.1})", } pipeline = ActPipelineSimple() @@ -393,7 +410,7 @@ def test_shared_complex_context(): samplings_to_make = { "Resolvable.op1::categorical__3": 2, "Resolvable.op2::categorical__3": 1, - "Resolvable.cell.kwargs{float_hp}::float__0.5_0.5_False": 0.5, + "Resolvable.cell.kwargs.mapping_value{float_hp}::float__0.5_0.5_False": 0.5, } pipeline = CellPipeline() From d6d28589b211a9f604ed2d988e5b053890b8e540 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Fri, 1 Aug 2025 22:44:31 +0200 Subject: [PATCH 054/156] Match above definition --- neps/space/neps_spaces/neps_space.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index f37fe974b..480d57648 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -700,7 +700,7 @@ def _resolve_sequence( self, resolvable_obj: tuple | list, context: SamplingResolutionContext, - ) -> list[Any] | tuple[Any]: + ) -> tuple[Any] | list[Any]: # The logic below is done so that if the original sequence # had only things that didn't need resolving, # we return the original object. From 0764f405f8dc1191ffb9347e4af2c0c2e5f10d4f Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Fri, 1 Aug 2025 22:45:31 +0200 Subject: [PATCH 055/156] Small changes --- neps/space/neps_spaces/neps_space.py | 8 ++++---- pyproject.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 480d57648..bb3e40c92 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -709,15 +709,15 @@ def _resolve_sequence( new_list = [] needed_resolving = False - for idx, item in enumerate(original_sequence): - resolved_item = self._resolve(item, f"sequence[{idx}]", context) + for idx, initial_item in enumerate(original_sequence): + resolved_item = self._resolve(initial_item, f"sequence[{idx}]", context) new_list.append(resolved_item) - needed_resolving = needed_resolving or (item is not resolved_item) + needed_resolving = needed_resolving or (initial_item is not resolved_item) result = original_sequence if needed_resolving: # We also want to return a result of the same type - # as the original received value. + # as the original received sequence. original_type = type(original_sequence) result = original_type(new_list) diff --git a/pyproject.toml b/pyproject.toml index 15383b4d4..78acfde3f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,7 @@ classifiers = [ requires-python = ">=3.10,<3.14" dependencies = [ - "numpy", + "numpy>=2.0", "pandas>=2.0,<3.0", "networkx>=2.6.3,<3.0", "scipy>=1.13.1", From 69587c89850bd7deca113d235a4b8bea6ec27d03 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Sat, 2 Aug 2025 18:03:21 +0200 Subject: [PATCH 056/156] Fix imports --- tests/test_neps_space/utils.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/test_neps_space/utils.py b/tests/test_neps_space/utils.py index 9529c3c40..bf7420c3e 100644 --- a/tests/test_neps_space/utils.py +++ b/tests/test_neps_space/utils.py @@ -2,15 +2,14 @@ from collections.abc import Callable -import neps.space.neps_spaces.parameters from neps.space.neps_spaces import neps_space def generate_possible_config_strings( - pipeline: neps.space.neps_spaces.parameters.PipelineSpace, + pipeline: neps_space.PipelineSpace, resolved_pipeline_attr_getter: Callable[ - [neps.space.neps_spaces.parameters.PipelineSpace], - neps.space.neps_spaces.parameters.Operation, + [neps_space.PipelineSpace], + neps_space.Operation, ], num_resolutions: int = 50_000, ): From 6c1a230422917822d05cd996ac8fc4245dae3a16 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Sat, 2 Aug 2025 21:33:54 +0200 Subject: [PATCH 057/156] Add repeated resolvable type --- neps/space/neps_spaces/neps_space.py | 46 +++++++++++++++++-- neps/space/neps_spaces/parameters.py | 69 +++++++++++++++++++++++++++- 2 files changed, 111 insertions(+), 4 deletions(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index bb3e40c92..0c1d3f1ca 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -24,7 +24,8 @@ PipelineSpace, Resampled, Resolvable, - _Lazy, + Repeated, + Lazy, ) from neps.space.neps_spaces.sampling import ( DomainSampler, @@ -477,7 +478,7 @@ def _( choice_provider_choices = choice_provider_final_attrs["choices"] if isinstance(choice_provider_choices, (tuple, list)): choice_provider_choices = tuple( - _Lazy(content=choice) for choice in choice_provider_choices + Lazy(content=choice) for choice in choice_provider_choices ) choice_provider_final_attrs["choices"] = choice_provider_choices choice_provider_adjusted = initial_attr_value.from_attrs( @@ -645,7 +646,39 @@ def _( @_resolver_dispatch.register def _( self, - resolvable_obj: _Lazy, + repeated_resolvable_obj: Repeated, + context: SamplingResolutionContext, + ) -> tuple[Any]: + if context.was_already_resolved(repeated_resolvable_obj): + return context.get_resolved(repeated_resolvable_obj) + + # First figure out how many times we need to resolvable repeated, + # then do that many resolves of that object. + # It does not matter what type the content is. + # Return all the results as a tuple. + + unresolved_count = repeated_resolvable_obj.count + resolved_count = self._resolve(unresolved_count, "repeat_count", context) + + if not isinstance(resolved_count, int): + raise ValueError( + f"The resolved count value for {repeated_resolvable_obj!r} is not an int." + f" Resolved to {resolved_count!r}" + ) + + obj_to_repeat = repeated_resolvable_obj.content + result = [] + for i in range(resolved_count): + result.append(self._resolve(obj_to_repeat, f"repeated_item[{i}]", context)) + result = tuple(result) + + context.add_resolved(repeated_resolvable_obj, result) + return result + + @_resolver_dispatch.register + def _( + self, + resolvable_obj: Lazy, context: SamplingResolutionContext, # noqa: ARG002 ) -> Any: # When resolving a lazy resolvable, @@ -653,6 +686,7 @@ def _( # The purpose of the lazy resolvable is to stop # the resolver from going deeper into the process. # In this case, to stop the resolution of `resolvable_obj.content`. + # No need to add it in the resolved cache. return resolvable_obj.content @_resolver_dispatch.register @@ -678,6 +712,9 @@ def _( if needed_resolving: result = new_dict + # IMPORTANT: Dicts are not stored in the resolved cache. + # Otherwise, we won't go inside them the next time + # and will ignore any resampled things inside. return result @_resolver_dispatch.register @@ -721,6 +758,9 @@ def _resolve_sequence( original_type = type(original_sequence) result = original_type(new_list) + # IMPORTANT: Sequences are not stored in the resolved cache. + # Otherwise, we won't go inside them the next time + # and will ignore any resampled things inside. return result @_resolver_dispatch.register diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 0ce258f7d..c2cb1e1ea 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -1103,7 +1103,67 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: return self._source.from_attrs(attrs) -class _Lazy(Resolvable): +class Repeated(Resolvable): + """A class representing a sequence where a resolvable + is repeated a variable number of times. + + Attributes: + count: The count how many times the content should be repeated. + content: The content which will be repeated. + """ + + def __init__( + self, + count: int | Domain[int] | Resolvable, + content: Resolvable | Any, + ): + if isinstance(count, int) and count < 0: + raise ValueError( + f"The received repeat count is negative. Received {count!r}" + ) + + self._count = count + self._content = content + + @property + def count(self) -> int | Domain[int] | Resolvable: + """Get the count how many times the content should be repeated. + + Returns: + The count how many times the content will be repeated. + """ + return self._count + + @property + def content(self) -> Resolvable | Any: + """Get the content which will be repeated. + + Returns: + The content which will be repeated. + """ + return self._content + + def get_attrs(self) -> Mapping[str, Any]: + """Get the attributes of the resolvable as a mapping. + + Returns: + A mapping of attribute names to their values. + """ + return {"count": self.count, "content": self.content} + + def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: + """Create a new resolvable object from the given attributes. + + Args: + attrs: A mapping of attribute names to their values. + + Returns: + A new resolvable object created from the specified attributes. + """ + return Repeated(count=attrs["count"], content=attrs["content"]) + + +class Lazy(Resolvable): """A class representing a lazy operation in a NePS space. The purpose is to have the resolution process @@ -1155,3 +1215,10 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: raise ValueError( f"This is a lazy resolvable. Can't create object for it: {self.content!r}." ) + +# TODO [lum]: all the `get_attrs` and `from_attrs` MUST NOT raise. +# They should return the best representation of themselves that they can. +# This is because all resolvable objects can be nested content of other +# resolvable objects that in general will interact with them +# through these two methods. +# When they raise, then the traversal will not be possible. From ab656f271bd3a6f5e088b47629b153a707cc5a60 Mon Sep 17 00:00:00 2001 From: Lum Birinxhiku <8531585+lumib@users.noreply.github.com> Date: Sat, 2 Aug 2025 21:52:04 +0200 Subject: [PATCH 058/156] Add TODOs --- neps/space/neps_spaces/neps_space.py | 4 ++++ neps/space/neps_spaces/parameters.py | 6 +++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 0c1d3f1ca..a78c45569 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -712,6 +712,8 @@ def _( if needed_resolving: result = new_dict + # TODO: [lum] reconsider this below. We likely should cache them, + # similarly to other things. # IMPORTANT: Dicts are not stored in the resolved cache. # Otherwise, we won't go inside them the next time # and will ignore any resampled things inside. @@ -758,6 +760,8 @@ def _resolve_sequence( original_type = type(original_sequence) result = original_type(new_list) + # TODO: [lum] reconsider this below. We likely should cache them, + # similarly to other things. # IMPORTANT: Sequences are not stored in the resolved cache. # Otherwise, we won't go inside them the next time # and will ignore any resampled things inside. diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index c2cb1e1ea..d35298945 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -1021,6 +1021,10 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: return type(self)(**attrs) +# TODO: [lum] For tuples, lists and dicts, +# should we make the behavior similar to other resolvables, +# in that they will be cached and then we also need to use Resampled for them? + class Resampled(Resolvable): """A class representing a resampling operation in a NePS space. @@ -1216,7 +1220,7 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: f"This is a lazy resolvable. Can't create object for it: {self.content!r}." ) -# TODO [lum]: all the `get_attrs` and `from_attrs` MUST NOT raise. +# TODO: [lum] all the `get_attrs` and `from_attrs` MUST NOT raise. # They should return the best representation of themselves that they can. # This is because all resolvable objects can be nested content of other # resolvable objects that in general will interact with them From 54f019dfa5e897fce6c9bb21ec3815b0d1d8a6d8 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 4 Aug 2025 12:40:26 +0200 Subject: [PATCH 059/156] Add inc_ratio parameter to _neps_bracket_optimizer and NePSPriorBandSampler for configurable incumbent ratio --- neps/optimizers/algorithms.py | 4 ++++ neps/optimizers/neps_priorband.py | 7 +++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 7c6f90697..32cb2cf9d 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -1444,6 +1444,7 @@ def _neps_bracket_optimizer( sampler: Literal["priorband"], sample_prior_first: bool | Literal["highest_fidelity"], early_stopping_rate: int | None, + inc_ratio: float = 0.9, ) -> _NePSBracketOptimizer: fidelity_attrs = pipeline_space.fidelity_attrs @@ -1527,6 +1528,7 @@ def _neps_bracket_optimizer( early_stopping_rate if early_stopping_rate is not None else 0 ), fid_bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + inc_ratio=inc_ratio, ) case _: raise ValueError(f"Unknown sampler: {sampler}") @@ -1544,6 +1546,7 @@ def _neps_bracket_optimizer( def neps_priorband( pipeline_space: PipelineSpace, *, + inc_ratio: float = 0.9, eta: int = 3, sample_prior_first: bool | Literal["highest_fidelity"] = False, base: Literal["successive_halving", "hyperband", "asha", "async_hb"] = "hyperband", @@ -1587,6 +1590,7 @@ def neps_priorband( sampler="priorband", sample_prior_first=sample_prior_first, early_stopping_rate=0 if base in ("successive_halving", "asha") else None, + inc_ratio=inc_ratio, ) diff --git a/neps/optimizers/neps_priorband.py b/neps/optimizers/neps_priorband.py index 2059d053a..16ea26e17 100644 --- a/neps/optimizers/neps_priorband.py +++ b/neps/optimizers/neps_priorband.py @@ -39,6 +39,9 @@ class NePSPriorBandSampler: fid_bounds: tuple[int, int] | tuple[float, float] """The fidelity bounds.""" + inc_ratio: float = 0.9 + """The ratio of the incumbent (vs. prior) in the sampling distribution.""" + def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: """Sample a configuration based on the PriorBand algorithm. @@ -121,8 +124,8 @@ def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: # TODO: [lum]: Here I am simply using fixed values. # Will maybe have to come up with a way to approximate the pdf for the top # configs. - inc_ratio = 0.9 - prior_ratio = 0.1 + inc_ratio = self.inc_ratio + prior_ratio = 1 - self.inc_ratio # 4. And finally, we distribute the original w_prior according to this ratio w_inc = w_prior * inc_ratio From cddc7471bb97d3e09da6ebb331d1f23781b6e1f1 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 7 Sep 2025 17:05:39 +0200 Subject: [PATCH 060/156] Refactor condition checks in convert_configspace to ensure attributes exist before evaluating conditions --- neps/space/neps_spaces/neps_space.py | 30 ++++++++++++---------------- neps/space/neps_spaces/parameters.py | 22 +++++++++++++++----- neps/space/parsing.py | 13 ++++++------ 3 files changed, 37 insertions(+), 28 deletions(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index a78c45569..082688caa 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -20,12 +20,12 @@ Fidelity, Float, Integer, + Lazy, Operation, PipelineSpace, + Repeated, Resampled, Resolvable, - Repeated, - Lazy, ) from neps.space.neps_spaces.sampling import ( DomainSampler, @@ -455,20 +455,16 @@ def _( # in the context that resolved value for the original object. # The original object can possibly be reused elsewhere. - if ( - isinstance(initial_attr_value, Categorical) - and context.was_already_resolved(initial_attr_value) - ): + if isinstance( + initial_attr_value, Categorical + ) and context.was_already_resolved(initial_attr_value): # Before making adjustments, we make sure we haven't # already chosen a value for the provider. # Otherwise, we already have the final answer for it. resolved_attr_value = context.get_resolved(initial_attr_value) - elif ( - isinstance(initial_attr_value, Categorical) or - ( - isinstance(initial_attr_value, Resampled) - and isinstance(initial_attr_value.source, Categorical) - ) + elif isinstance(initial_attr_value, Categorical) or ( + isinstance(initial_attr_value, Resampled) + and isinstance(initial_attr_value.source, Categorical) ): # We have a previously unseen provider. # Create a new object where the choices are lazy, @@ -476,7 +472,7 @@ def _( choice_provider_final_attrs = {**initial_attr_value.get_attrs()} choice_provider_choices = choice_provider_final_attrs["choices"] - if isinstance(choice_provider_choices, (tuple, list)): + if isinstance(choice_provider_choices, tuple | list): choice_provider_choices = tuple( Lazy(content=choice) for choice in choice_provider_choices ) @@ -670,10 +666,10 @@ def _( result = [] for i in range(resolved_count): result.append(self._resolve(obj_to_repeat, f"repeated_item[{i}]", context)) - result = tuple(result) + result = tuple(result) # type: ignore[assignment] context.add_resolved(repeated_resolvable_obj, result) - return result + return result # type: ignore[return-value] @_resolver_dispatch.register def _( @@ -725,7 +721,7 @@ def _( resolvable_obj: tuple, context: SamplingResolutionContext, ) -> tuple[Any]: - return self._resolve_sequence(resolvable_obj, context) + return self._resolve_sequence(resolvable_obj, context) # type: ignore[return-value] @_resolver_dispatch.register def _( @@ -733,7 +729,7 @@ def _( resolvable_obj: list, context: SamplingResolutionContext, ) -> list[Any]: - return self._resolve_sequence(resolvable_obj, context) + return self._resolve_sequence(resolvable_obj, context) # type: ignore[return-value] def _resolve_sequence( self, diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index d35298945..b33fd63bd 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -424,7 +424,7 @@ def __init__( for choice in self._choices ) else: - self._choices = choices + self._choices = choices # type: ignore[assignment] self._prior = prior self._prior_confidence = ( convert_confidence_level(prior_confidence) @@ -1025,6 +1025,7 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: # should we make the behavior similar to other resolvables, # in that they will be cached and then we also need to use Resampled for them? + class Resampled(Resolvable): """A class representing a resampling operation in a NePS space. @@ -1121,10 +1122,14 @@ def __init__( count: int | Domain[int] | Resolvable, content: Resolvable | Any, ): + """Initialize the Repeated object with a count and content. + + Args: + count: The count how many times the content should be repeated. + content: The content which will be repeated. + """ if isinstance(count, int) and count < 0: - raise ValueError( - f"The received repeat count is negative. Received {count!r}" - ) + raise ValueError(f"The received repeat count is negative. Received {count!r}") self._count = count self._content = content @@ -1180,6 +1185,12 @@ class Lazy(Resolvable): """ def __init__(self, content: Resolvable | tuple[Any] | str): + """Initialize the Lazy object with content. + + Args: + content: The content being held, which can be a resolvable object + or a tuple or a string. + """ self._content = content @property @@ -1202,7 +1213,7 @@ def get_attrs(self) -> Mapping[str, Any]: f"This is a lazy resolvable. Can't get attrs from it: {self.content!r}." ) - def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: + def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: # noqa: ARG002 """Create a new resolvable object from the given attributes. Args: @@ -1220,6 +1231,7 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: f"This is a lazy resolvable. Can't create object for it: {self.content!r}." ) + # TODO: [lum] all the `get_attrs` and `from_attrs` MUST NOT raise. # They should return the best representation of themselves that they can. # This is because all resolvable objects can be nested content of other diff --git a/neps/space/parsing.py b/neps/space/parsing.py index 186421f78..2648a6faa 100644 --- a/neps/space/parsing.py +++ b/neps/space/parsing.py @@ -217,7 +217,7 @@ def convert_mapping(pipeline_space: Mapping[str, Any]) -> SearchSpace: return SearchSpace(parameters) -def convert_configspace(configspace: ConfigurationSpace) -> SearchSpace: +def convert_configspace(configspace: ConfigurationSpace) -> SearchSpace: # noqa: C901 """Constructs a [`SearchSpace`][neps.space.SearchSpace] from a [`ConfigurationSpace`](https://automl.github.io/ConfigSpace/latest/). @@ -230,11 +230,12 @@ def convert_configspace(configspace: ConfigurationSpace) -> SearchSpace: import ConfigSpace as CS space: dict[str, Parameter | HPOConstant] = {} - if any(configspace.conditions) or any(configspace.forbidden_clauses): - raise NotImplementedError( - "The ConfigurationSpace has conditions or forbidden clauses, " - "which are not supported by neps." - ) + if hasattr(configspace, "conditions") and hasattr(configspace, "forbidden_clauses"): # noqa: SIM102 + if any(configspace.conditions) or any(configspace.forbidden_clauses): + raise NotImplementedError( + "The ConfigurationSpace has conditions or forbidden clauses, " + "which are not supported by neps." + ) for name, hyperparameter in configspace.items(): match hyperparameter: From 755723e912a158fad1d09ab2357e9102df001d08 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 7 Sep 2025 18:54:18 +0200 Subject: [PATCH 061/156] Add convert_classic_to_neps_search_space function to convert classic SearchSpace to NePS PipelineSpace --- neps/space/neps_spaces/neps_space.py | 70 ++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 082688caa..e2bfb67c3 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -15,6 +15,7 @@ from neps.optimizers import algorithms, optimizer from neps.space.neps_spaces import config_string from neps.space.neps_spaces.parameters import ( + _UNSET, Categorical, Domain, Fidelity, @@ -1169,6 +1170,75 @@ def convert_neps_to_classic_search_space(space: PipelineSpace) -> SearchSpace | return None +def convert_classic_to_neps_search_space( + space: SearchSpace, +) -> PipelineSpace: + """Convert a classic SearchSpace to a NePS PipelineSpace if possible. + This function converts a classic SearchSpace to a NePS PipelineSpace. + + Args: + space: The classic SearchSpace to convert. + + Returns: + A NePS PipelineSpace. + """ + + class NEPSSpace(PipelineSpace): + """A NePS-specific PipelineSpace.""" + + for parameter_name, parameter in space.elements.items(): + if isinstance(parameter, neps.HPOCategorical): + setattr( + NEPSSpace, + parameter_name, + Categorical( + choices=tuple(parameter.choices), + prior=( + parameter.choices.index(parameter.prior) + if parameter.prior + else _UNSET + ), + prior_confidence=( + parameter.prior_confidence + if parameter.prior_confidence + else _UNSET + ), + ), + ) + elif isinstance(parameter, neps.HPOConstant): + setattr(NEPSSpace, parameter_name, parameter.value) + elif isinstance(parameter, neps.HPOInteger): + new_integer = Integer( + min_value=parameter.lower, + max_value=parameter.upper, + prior=parameter.prior if parameter.prior else _UNSET, + prior_confidence=( + parameter.prior_confidence if parameter.prior_confidence else _UNSET + ), + ) + setattr( + NEPSSpace, + parameter_name, + (Fidelity(domain=new_integer) if parameter.is_fidelity else new_integer), + ) + elif isinstance(parameter, neps.HPOFloat): + new_float = Float( + min_value=parameter.lower, + max_value=parameter.upper, + prior=parameter.prior if parameter.prior else _UNSET, + prior_confidence=( + parameter.prior_confidence if parameter.prior_confidence else _UNSET + ), + ) + setattr( + NEPSSpace, + parameter_name, + (Fidelity(domain=new_float) if parameter.is_fidelity else new_float), + ) + + return NEPSSpace() + + def check_neps_space_compatibility( optimizer_to_check: ( algorithms.OptimizerChoice From 0e5f23485a1b5d1e3bbfcfba36541eb24dd66848 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 7 Sep 2025 19:05:16 +0200 Subject: [PATCH 062/156] Enhance compatibility check in run function to ensure pipeline_space is a PipelineSpace instance before conversion --- neps/api.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/neps/api.py b/neps/api.py index f63366249..a71a620c9 100644 --- a/neps/api.py +++ b/neps/api.py @@ -363,7 +363,11 @@ def __call__( # If the optimizer is not a NEPS algorithm, we try to convert the pipeline_space neps_classic_space_compatibility = check_neps_space_compatibility(optimizer) - if neps_classic_space_compatibility in ["both", "classic"] and not warmstart_configs: + if ( + neps_classic_space_compatibility in ["both", "classic"] + and isinstance(pipeline_space, PipelineSpace) + and not warmstart_configs + ): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space: pipeline_space = converted_space From cd8c389bb78c909ee4d4a243945daf44561f0ed3 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 7 Sep 2025 22:14:54 +0200 Subject: [PATCH 063/156] Add log parameter to Integer and Float conversions in convert_classic_to_neps_search_space --- neps/space/neps_spaces/neps_space.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index e2bfb67c3..9213281d0 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -1211,6 +1211,7 @@ class NEPSSpace(PipelineSpace): new_integer = Integer( min_value=parameter.lower, max_value=parameter.upper, + log=parameter.log, prior=parameter.prior if parameter.prior else _UNSET, prior_confidence=( parameter.prior_confidence if parameter.prior_confidence else _UNSET @@ -1225,6 +1226,7 @@ class NEPSSpace(PipelineSpace): new_float = Float( min_value=parameter.lower, max_value=parameter.upper, + log=parameter.log, prior=parameter.prior if parameter.prior else _UNSET, prior_confidence=( parameter.prior_confidence if parameter.prior_confidence else _UNSET From d4437c07128036e5a431981dcdaa032334fbee27 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 8 Sep 2025 00:05:15 +0200 Subject: [PATCH 064/156] Add string representation method to PipelineSpace class --- neps/space/neps_spaces/parameters.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index b33fd63bd..2022eb942 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -188,6 +188,20 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> PipelineSpace: setattr(new_pipeline, name, value) return new_pipeline + def __str__(self) -> str: + """Get a string representation of the pipeline. + + Returns: + A string representation of the pipeline, including its class name and + attributes. + """ + attrs = "\n\t".join( + f"{k} = {v!r}" + for k, v in self.get_attrs().items() + if not k.startswith("_") and not callable(v) + ) + return f"PipelineSpace {self.__class__.__name__} with parameters:\n\t{attrs}" + class ConfidenceLevel(enum.Enum): """Enum representing confidence levels for sampling.""" From a443002a0dc61be9947f9908121995a3bf45be71 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 8 Sep 2025 00:15:52 +0200 Subject: [PATCH 065/156] Remove redundant check for 'auto' in compatibility verification --- neps/space/neps_spaces/neps_space.py | 1 - 1 file changed, 1 deletion(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 9213281d0..4b8172810 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -1296,7 +1296,6 @@ def check_neps_space_compatibility( algorithms.complex_random_search, ) ) - or optimizer_to_check == "auto" or ( optimizer_to_check[0] in ( From 9f8613f5b66dfaec3e9a1c9bd47651e3d2e8d771 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 8 Sep 2025 00:49:29 +0200 Subject: [PATCH 066/156] Add string representation methods for Fidelity, Categorical, Float, Integer, Operation, and Resampled classes --- neps/space/neps_spaces/parameters.py | 50 +++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 2022eb942..fd3e42492 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -82,6 +82,12 @@ def __init__(self, domain: Integer | Float): ) self._domain = domain + def __str__(self) -> str: + """Get a string representation of the fidelity.""" + return f"Fidelity({ + self._domain.__str__() if hasattr(self._domain, '__str__') else self._domain!r + })" + @property def min_value(self) -> int | float: """Get the minimum value of the fidelity domain. @@ -196,7 +202,7 @@ def __str__(self) -> str: attributes. """ attrs = "\n\t".join( - f"{k} = {v!r}" + f"{k} = {v.__str__() if hasattr(v, '__str__') else v!r}" for k, v in self.get_attrs().items() if not k.startswith("_") and not callable(v) ) @@ -450,6 +456,18 @@ def __init__( "If prior is set, prior_confidence must also be set to a valid value." ) + def __str__(self) -> str: + """Get a string representation of the categorical domain.""" + string = f"Categorical(choices={ + self._choices.__str__() + if hasattr(self._choices, '__str__') + else self._choices + }" + if self.has_prior: + string += f", prior={self._prior}, prior_confidence={self._prior_confidence}" + string += ")" + return string + @property def min_value(self) -> int: """Get the minimum value of the categorical domain. @@ -626,6 +644,16 @@ def __init__( "If prior is set, prior_confidence must also be set to a valid value." ) + def __str__(self) -> str: + """Get a string representation of the floating-point domain.""" + string = f"Float({self._min_value}, {self._max_value}" + if self._log: + string += ", log" + if self.has_prior: + string += f", prior={self._prior}, prior_confidence={self._prior_confidence}" + string += ")" + return string + @property def min_value(self) -> float: """Get the minimum value of the floating-point domain. @@ -800,6 +828,16 @@ def __init__( "If prior is set, prior_confidence must also be set to a valid value." ) + def __str__(self) -> str: + """Get a string representation of the integer domain.""" + string = f"Integer({self._min_value}, {self._max_value}" + if self._log: + string += ", log" + if self.has_prior: + string += f", prior={self._prior}, prior_confidence={self._prior_confidence}" + string += ")" + return string + @property def min_value(self) -> int: """Get the minimum value of the integer domain. @@ -969,6 +1007,13 @@ def __init__( else: self._kwargs = kwargs + def __str__(self) -> str: + """Get a string representation of the operation.""" + return ( + f"Operation(operator={self._operator!r}, args={self._args!r}," + f" kwargs={self._kwargs!r})" + ) + @property def operator(self) -> Callable | str: """Get the operator of the operation. @@ -1056,6 +1101,9 @@ def __init__(self, source: Resolvable | str): """ self._source = source + def __str__(self) -> str: + return f"Resampled({self._source!r})" + @property def source(self) -> Resolvable | str: """Get the source of the resampling. From 85ffe02c0373610c67905551e68300cfce2bab0e Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 8 Sep 2025 00:58:16 +0200 Subject: [PATCH 067/156] Simplify string representation method in Fidelity class --- neps/space/neps_spaces/parameters.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index fd3e42492..e016ee01d 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -84,9 +84,7 @@ def __init__(self, domain: Integer | Float): def __str__(self) -> str: """Get a string representation of the fidelity.""" - return f"Fidelity({ - self._domain.__str__() if hasattr(self._domain, '__str__') else self._domain!r - })" + return f"Fidelity({self._domain.__str__()})" @property def min_value(self) -> int | float: @@ -202,7 +200,7 @@ def __str__(self) -> str: attributes. """ attrs = "\n\t".join( - f"{k} = {v.__str__() if hasattr(v, '__str__') else v!r}" + f"{k} = {v!s}" for k, v in self.get_attrs().items() if not k.startswith("_") and not callable(v) ) @@ -1010,8 +1008,8 @@ def __init__( def __str__(self) -> str: """Get a string representation of the operation.""" return ( - f"Operation(operator={self._operator!r}, args={self._args!r}," - f" kwargs={self._kwargs!r})" + f"Operation(operator={self._operator!s}, args={self._args!s}," + f" kwargs={self._kwargs!s})" ) @property @@ -1102,7 +1100,7 @@ def __init__(self, source: Resolvable | str): self._source = source def __str__(self) -> str: - return f"Resampled({self._source!r})" + return f"Resampled({self._source!s})" @property def source(self) -> Resolvable | str: From d3e76dc411b45c169d5eb730ffcde60c3962da31 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 8 Sep 2025 00:59:57 +0200 Subject: [PATCH 068/156] Simplify string representation method in Categorical class --- neps/space/neps_spaces/parameters.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index e016ee01d..f223f5fa5 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -456,11 +456,7 @@ def __init__( def __str__(self) -> str: """Get a string representation of the categorical domain.""" - string = f"Categorical(choices={ - self._choices.__str__() - if hasattr(self._choices, '__str__') - else self._choices - }" + string = f"Categorical(choices={self._choices!s}" if self.has_prior: string += f", prior={self._prior}, prior_confidence={self._prior_confidence}" string += ")" From 10b5080b90fe1341f8c60b819ec8301783edebb2 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 8 Sep 2025 18:47:28 +0200 Subject: [PATCH 069/156] Add conversion logic for classic to NEPS search space in API --- neps/api.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/neps/api.py b/neps/api.py index a71a620c9..5749cd0c1 100644 --- a/neps/api.py +++ b/neps/api.py @@ -21,6 +21,7 @@ NepsCompatConverter, adjust_evaluation_pipeline_for_neps_space, check_neps_space_compatibility, + convert_classic_to_neps_search_space, convert_neps_to_classic_search_space, ) from neps.space.neps_spaces.parameters import PipelineSpace @@ -41,7 +42,7 @@ logger = logging.getLogger(__name__) -def run( # noqa: PLR0913, C901 +def run( # noqa: PLR0913, PLR0912, C901 evaluate_pipeline: Callable[..., EvaluatePipelineReturn] | str, pipeline_space: ConfigurationSpace | PipelineSpace, *, @@ -372,6 +373,11 @@ def __call__( if converted_space: pipeline_space = converted_space + if neps_classic_space_compatibility == "neps" and not isinstance( + pipeline_space, PipelineSpace + ): + pipeline_space = convert_classic_to_neps_search_space(pipeline_space) + # Optimizer check, if the search space is a Pipeline and the optimizer is not a NEPS # algorithm, we raise an error, as the optimizer is not compatible. if ( From 6fe13c93dbed400e36d420ec9dc448e675600b91 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 8 Sep 2025 18:53:17 +0200 Subject: [PATCH 070/156] Refactor run function to use 'space' variable for consistency in compatibility checks and conversions --- neps/api.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/neps/api.py b/neps/api.py index 5749cd0c1..1ec9152ed 100644 --- a/neps/api.py +++ b/neps/api.py @@ -373,30 +373,28 @@ def __call__( if converted_space: pipeline_space = converted_space + space = convert_to_space(pipeline_space) + if neps_classic_space_compatibility == "neps" and not isinstance( - pipeline_space, PipelineSpace + space, PipelineSpace ): - pipeline_space = convert_classic_to_neps_search_space(pipeline_space) + space = convert_classic_to_neps_search_space(space) # Optimizer check, if the search space is a Pipeline and the optimizer is not a NEPS # algorithm, we raise an error, as the optimizer is not compatible. - if ( - isinstance(pipeline_space, PipelineSpace) - and neps_classic_space_compatibility == "classic" - ): + if isinstance(space, PipelineSpace) and neps_classic_space_compatibility == "classic": raise ValueError( "The provided optimizer is not compatible with this complex search space. " "Please use one that is, such as 'random_search', " "'priorband', or 'complex_random_search'." ) - if isinstance(pipeline_space, PipelineSpace): + if isinstance(space, PipelineSpace): assert not isinstance(evaluate_pipeline, str) evaluate_pipeline = adjust_evaluation_pipeline_for_neps_space( - evaluate_pipeline, pipeline_space + evaluate_pipeline, space ) - space = convert_to_space(pipeline_space) _optimizer_ask, _optimizer_info = load_optimizer(optimizer=optimizer, space=space) _eval: Callable From 0d9fafc6e6f78fa9e007714cf474042061abb499 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 21 Sep 2025 19:08:34 +0200 Subject: [PATCH 071/156] Update NePS Spaces documentation to clarify usage of pipeline_space in neps.run() and enhance fidelity parameter explanation --- docs/reference/neps_spaces.md | 40 +++++++++++++++++------------------ 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 3ca6c4b90..e15dd55c6 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -23,41 +23,41 @@ class MySpace(neps.PipelineSpace): cat_param = neps.Categorical(choices=("A", "B", "C")) ``` -!!! tip "**[Fidelity](../reference/search_algorithms/landing_page_algo.md#what-is-multi-fidelity-optimization) Parameters**" +!!! info "Using **NePS Spaces**" - Passing a [`neps.Integer`][neps.space.neps_spaces.parameters.Integer] or [`neps.Float`][neps.space.neps_spaces.parameters.Float] to a [`neps.Fidelity`][neps.space.neps_spaces.parameters.Fidelity] allows you to employ multi-fidelity optimization strategies, which can significantly speed up the optimization process by evaluating configurations at different fidelities (e.g., training for fewer epochs): + To search a **NePS space**, pass it as the `pipeline_space` argument to the `neps.run()` function: ```python - epochs = neps.Fidelity(neps.Integer(1, 16)) + neps.run( + ..., + pipeline_space=MySpace() + ) ``` - For more details on how to use fidelity parameters, see the [Multi-Fidelity](../reference/search_algorithms/landing_page_algo.md#what-is-multi-fidelity-optimization) section. + For more details on how to use the `neps.run()` function, see the [NePS Run Reference](../reference/neps_run.md). -!!! tip "**Using your knowledge, providing a [Prior](../reference/search_algorithms/landing_page_algo.md#what-are-priors)**" +### Using cheap approximation, providing a [**Fidelity**](../reference/search_algorithms/landing_page_algo.md#what-is-multi-fidelity-optimization) Parameter - You can provide **your knowledge about where a good value for this parameter lies** by indicating a `prior=`. You can also specify a `prior_confidence=` to indicate how strongly you want NePS to focus on these, one of either `"low"`, `"medium"`, or `"high"`: +Passing a [`neps.Integer`][neps.space.neps_spaces.parameters.Integer] or [`neps.Float`][neps.space.neps_spaces.parameters.Float] to a [`neps.Fidelity`][neps.space.neps_spaces.parameters.Fidelity] allows you to employ multi-fidelity optimization strategies, which can significantly speed up the optimization process by evaluating configurations at different fidelities (e.g., training for fewer epochs): - ```python - # Here "A" is used as a prior, indicated by its index 0 - cat_with_prior = neps.Categorical(choices=("A", "B", "C"), prior=0, prior_confidence="high") - ``` +```python +epochs = neps.Fidelity(neps.Integer(1, 16)) +``` - For more details on how to use priors, see the [Priors](../reference/search_algorithms/landing_page_algo.md#what-are-priors) section. +For more details on how to use fidelity parameters, see the [Multi-Fidelity](../reference/search_algorithms/landing_page_algo.md#what-is-multi-fidelity-optimization) section. -## 2. Using **NePS Spaces** +### Using your knowledge, providing a [**Prior**](../reference/search_algorithms/landing_page_algo.md#what-are-priors) -To search a **NePS space**, pass it as the `pipeline_space` argument to the `neps.run()` function: +You can provide **your knowledge about where a good value for this parameter lies** by indicating a `prior=`. You can also specify a `prior_confidence=` to indicate how strongly you want NePS to focus on these, one of either `"low"`, `"medium"`, or `"high"`: ```python -neps.run( - ..., - pipeline_space=MySpace() -) +# Here "A" is used as a prior, indicated by its index 0 +cat_with_prior = neps.Categorical(choices=("A", "B", "C"), prior=0, prior_confidence="high") ``` -For more details on how to use the `neps.run()` function, see the [NePS Run Reference](../reference/neps_run.md). +For more details on how to use priors, see the [Priors](../reference/search_algorithms/landing_page_algo.md#what-are-priors) section. -## 3. Architectures +## 3. Constructing Architecture Spaces Additionally, **NePS spaces** can describe **complex (hierarchical) architectures** using: @@ -109,7 +109,7 @@ This can be used for efficient architecture search by defining cells and blocks - [`Complex Random Search`][neps.optimizers.algorithms.complex_random_search], which can sample the space uniformly at random, using priors and mutating previously sampled configurations - [`PriorBand`][neps.optimizers.algorithms.priorband], which uses [multi-fidelity](./search_algorithms/multifidelity.md) and the prior knowledge encoded in the NePS space -## 4. General Structures +## 4. Constructing Complex Spaces Until now all parameters are sampled once and their value used for all occurrences. This section describes how to resample parameters in different contexts using: From adfefe1b31b96ae2ac6d355488a71fddc014277a Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 23 Sep 2025 18:03:20 +0200 Subject: [PATCH 072/156] feat: Enhance NePS Spaces with new parameter addition and removal methods --- docs/reference/neps_spaces.md | 32 +++++- neps/space/neps_spaces/parameters.py | 162 ++++++++++++++++++++++++++- 2 files changed, 190 insertions(+), 4 deletions(-) diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 42c668ccc..faf753ecc 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -87,6 +87,8 @@ Operation also allow for (keyword-)arguments to be defined, including other para ```python + batch_size = neps.Categorical(choices=(16, 32, 64)) + _layer_size = neps.Integer(min_value=80, max_value=100) hidden_layer = neps.Operation( @@ -100,6 +102,27 @@ Operation also allow for (keyword-)arguments to be defined, including other para ``` This can be used for efficient architecture search by defining cells and blocks of operations, that make up a neural network. +The `evaluate_pipeline` function will receive the sampled operations as Callables, which can be used to instantiate the model: + +```python +def evaluate_pipeline( + activation_function: torch.nn.Module, + batch_size: int, + hidden_layer: torch.nn.Linear): + + # Instantiate the model using the sampled operations + model = torch.nn.Sequential( + torch.nn.Flatten(), + hidden_layer, + activation_function, + torch.nn.Linear(in_features=hidden_layer.out_features, out_features=10) + ) + + # Use the model for training and return the validation accuracy + model.train(batch_size=batch_size, ...) + return model.evaluate(...).accuracy + +``` ??? abstract "Structural Space-compatible optimizers" @@ -118,6 +141,7 @@ Until now all parameters are sampled once and their value used for all occurrenc With `neps.Resampled` you can reuse a parameter, even themselves recursively, but with a new value each time: ```python +class ResampledSpace(neps.PipelineSpace): float_param = neps.Float(min_value=0, max_value=1) # The resampled parameter will have the same range but will be sampled @@ -128,11 +152,12 @@ With `neps.Resampled` you can reuse a parameter, even themselves recursively, bu This is especially useful for defining complex architectures, where e.g. a cell block is defined and then resampled multiple times to create a neural network architecture: ```python - +class CNN_Space(neps.PipelineSpace): _kernel_size = neps.Integer(min_value=5, max_value=8) # Define a cell block that can be resampled # It will resample a new kernel size from _kernel_size each time + # Each instance will be identically but independently sampled _cell_block = neps.Operation( operator=torch.nn.Conv2d, kwargs={"kernel_size": neps.Resampled(source=_kernel_size)} @@ -144,6 +169,11 @@ This is especially useful for defining complex architectures, where e.g. a cell neps.Resampled(_cell_block), neps.Resampled(_cell_block), ) + +def evaluate_pipeline(cnn: torch.nn.Module): + # Use the cnn model for training and return the validation accuracy + cnn.train(...) + return cnn.evaluate(...).accuracy ``` ??? info "Self- and future references" diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index f223f5fa5..53322fb3a 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -17,7 +17,8 @@ class _Unset: - pass + def __repr__(self) -> str: + return "" _UNSET = _Unset() @@ -86,6 +87,11 @@ def __str__(self) -> str: """Get a string representation of the fidelity.""" return f"Fidelity({self._domain.__str__()})" + def __eq__(self, other: Fidelity | object) -> bool: + if not isinstance(other, Fidelity): + raise ValueError("__eq__ only available to compare to Fidelity objects.") + return self._domain == other._domain + @property def min_value(self) -> int | float: """Get the minimum value of the fidelity domain. @@ -159,12 +165,13 @@ def get_attrs(self) -> Mapping[str, Any]: attrs = {} for attr_name, attr_value in vars(self.__class__).items(): - if attr_name.startswith("_") or callable(attr_value): + if attr_name.startswith("_") or callable(attr_value) or attr_value is None: continue + # Skip if this parameter has been marked as removed attrs[attr_name] = attr_value for attr_name, attr_value in vars(self).items(): - if attr_name.startswith("_") or callable(attr_value): + if attr_name.startswith("_") or callable(attr_value) or attr_value is None: continue attrs[attr_name] = attr_value @@ -206,6 +213,115 @@ def __str__(self) -> str: ) return f"PipelineSpace {self.__class__.__name__} with parameters:\n\t{attrs}" + def __add__( + self, + other: ( + Integer + | Float + | Categorical + | Operation + | Resampled + | Repeated + | PipelineSpace + ), + name: str | None = None, + ) -> PipelineSpace: + """Add a new parameter to the pipeline. + + Args: + other: The parameter to be added, which can be an Integer, Float, + Categorical, Operation, Resampled, Repeated, or PipelineSpace. + name: The name of the parameter to be added. If None, a default name will be + generated. + + Returns: + A new PipelineSpace instance with the added parameter. + + Raises: + ValueError: If the parameter is not of a supported type or if a parameter + with the same name already exists in the pipeline. + """ + if isinstance(other, PipelineSpace): + new_space = self + for exist_name, value in other.get_attrs().items(): + new_space = new_space.__add__(value, exist_name) + return new_space + + if not isinstance( + other, Integer | Float | Categorical | Operation | Resampled | Repeated + ): + raise ValueError( + "Can only add Integer, Float, Categorical, Operation, Resampled," + f" Repeated or PipelineSpace, got {other!r}." + ) + param_name = name if name else f"param_{len(self.get_attrs()) + 1}" + + class NewSpace(PipelineSpace): + pass + + NewSpace.__name__ = self.__class__.__name__ + + new_pipeline = NewSpace() + for exist_name, value in self.get_attrs().items(): + setattr(new_pipeline, exist_name, value) + if exist_name == param_name and not value == other: + raise ValueError( + f"A different parameter with the name {param_name!r} already exists" + " in the pipeline:\n" + f" {value}\n" + f" {other}" + ) + if not hasattr(new_pipeline, param_name): + setattr(new_pipeline, param_name, other) + return new_pipeline + + def add( + self, + new_param: Integer | Float | Categorical | Operation | Resampled | Repeated, + name: str | None = None, + ) -> PipelineSpace: + """Add a new parameter to the pipeline. This is NOT an in-place operation. + + Args: + new_param: The parameter to be added, which can be an Integer, Float, + Categorical, Operation, Resampled, or Repeated domain. + name: The name of the parameter to be added. If None, a default name will + be generated. + + Returns: + A NEW PipelineSpace with the added parameter. + """ + return self.__add__(new_param, name) + + def remove(self, name: str) -> PipelineSpace: + """Remove a parameter from the pipeline by its name. This is NOT an in-place + operation. + + Args: + name: The name of the parameter to be removed. + + Returns: + A NEW PipelineSpace without the removed parameter. + + Raises: + ValueError: If no parameter with the specified name exists in the pipeline. + """ + if name not in self.get_attrs(): + raise ValueError( + f"No parameter with the name {name!r} exists in the pipeline." + ) + + class NewSpace(PipelineSpace): + pass + + NewSpace.__name__ = self.__class__.__name__ + new_pipeline = NewSpace() + for exist_name, value in self.get_attrs().items(): + if exist_name != name: + setattr(new_pipeline, exist_name, value) + + return new_pipeline + class ConfidenceLevel(enum.Enum): """Enum representing confidence levels for sampling.""" @@ -462,6 +578,15 @@ def __str__(self) -> str: string += ")" return string + def __eq__(self, other: Categorical | object) -> bool: + if not isinstance(other, Categorical): + raise ValueError("__eq__ only available to compare to Categorical objects.") + return ( + self.prior == other.prior + and self.prior_confidence == other.prior_confidence + and self.choices == other.choices + ) + @property def min_value(self) -> int: """Get the minimum value of the categorical domain. @@ -648,6 +773,17 @@ def __str__(self) -> str: string += ")" return string + def __eq__(self, other: Float | object) -> bool: + if not isinstance(other, Float): + raise ValueError("__eq__ only available to compare to Float objects.") + return ( + self._prior == other._prior + and self._prior_confidence == other._prior_confidence + and self.min_value == other.min_value + and self.max_value == other.max_value + and self._log == other._log + ) + @property def min_value(self) -> float: """Get the minimum value of the floating-point domain. @@ -832,6 +968,17 @@ def __str__(self) -> str: string += ")" return string + def __eq__(self, other: Integer | object) -> bool: + if not isinstance(other, Integer): + raise ValueError("__eq__ only available to compare to Integer objects.") + return ( + self._prior == other._prior + and self._prior_confidence == other._prior_confidence + and self.min_value == other.min_value + and self.max_value == other.max_value + and self._log == other._log + ) + @property def min_value(self) -> int: """Get the minimum value of the integer domain. @@ -1008,6 +1155,15 @@ def __str__(self) -> str: f" kwargs={self._kwargs!s})" ) + def __eq__(self, other: Operation | object) -> bool: + if not isinstance(other, Operation): + raise ValueError("__eq__ only available to compare to Operation objects.") + return ( + self.operator == other.operator + and self.args == other.args + and self.kwargs == other.kwargs + ) + @property def operator(self) -> Callable | str: """Get the operator of the operation. From 0ab0e0992799fe3f7f1a8e2defb0f7d48b05d3f7 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 23 Sep 2025 18:06:09 +0200 Subject: [PATCH 073/156] feat: Add methods for adding and removing parameters in NePS Spaces --- docs/reference/neps_spaces.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index faf753ecc..5b0f2cf31 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -57,6 +57,20 @@ cat_with_prior = neps.Categorical(choices=("A", "B", "C"), prior=0, prior_confid For more details on how to use priors, see the [Priors](../reference/search_algorithms/landing_page_algo.md#what-are-priors) section. +!!! info "Adding and removing parameters from **NePS Spaces**" + + To add or remove parameters from a `PipelineSpace` after its definition, you can use the `+` operator or the `add()` and `remove()` methods. Mind you, these methods do NOT modify the existing space in-place, but return a new instance with the modifications: + + ```python + space = MySpace() + # Adding a new parameter, this will appear as param_n where n is the next available index + space = space + neps.Float(min_value=0.01, max_value=0.1) + # Or using the add() method, this allows you to specify a name + space = space.add(neps.Integer(min_value=5, max_value=15), name="new_int_param") + # Removing a parameter by its name + space = space.remove("cat_param") + ``` + ## 3. Constructing Architecture Spaces Additionally, **NePS spaces** can describe **complex (hierarchical) architectures** using: From 6e3faa9422c6f9c1297c6953845f110828f4f9d2 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 25 Sep 2025 14:19:26 +0200 Subject: [PATCH 074/156] Started HyperBand --- neps/optimizers/algorithms.py | 15 ++- neps/optimizers/neps_bracket_optimizer.py | 16 ++- neps_examples/basic_usage/priors_test.ipynb | 142 +++++++++++++------- 3 files changed, 119 insertions(+), 54 deletions(-) diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 2e766cb9c..f10cb878e 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -55,6 +55,11 @@ PipelineSpace, Resolvable, ) +from neps.space.neps_spaces.sampling import ( + DomainSampler, + PriorOrFallbackSampler, + RandomSampler, +) from neps.space.parsing import convert_mapping if TYPE_CHECKING: @@ -1528,7 +1533,7 @@ def _neps_bracket_optimizer( *, bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], eta: int, - sampler: Literal["priorband"], + sampler: Literal["priorband", "uniform", "prior"], sample_prior_first: bool | Literal["highest_fidelity"], early_stopping_rate: int | None, inc_ratio: float = 0.9, @@ -1605,7 +1610,7 @@ def _neps_bracket_optimizer( case _: raise ValueError(f"Unknown bracket type: {bracket_type}") - _sampler: NePSPriorBandSampler + _sampler: NePSPriorBandSampler | DomainSampler match sampler: case "priorband": _sampler = NePSPriorBandSampler( @@ -1617,6 +1622,12 @@ def _neps_bracket_optimizer( fid_bounds=(fidelity_obj.min_value, fidelity_obj.max_value), inc_ratio=inc_ratio, ) + case "uniform": + _sampler = RandomSampler({}) + case "prior": + _sampler = PriorOrFallbackSampler( + fallback_sampler=RandomSampler({}), always_use_prior=False + ) case _: raise ValueError(f"Unknown sampler: {sampler}") diff --git a/neps/optimizers/neps_bracket_optimizer.py b/neps/optimizers/neps_bracket_optimizer.py index 3b34da4a1..e184143b4 100644 --- a/neps/optimizers/neps_bracket_optimizer.py +++ b/neps/optimizers/neps_bracket_optimizer.py @@ -15,17 +15,18 @@ import pandas as pd import neps.optimizers.bracket_optimizer as standard_bracket_optimizer +from neps.optimizers.neps_priorband import NePSPriorBandSampler from neps.optimizers.optimizer import SampledConfig from neps.optimizers.utils.brackets import PromoteAction, SampleAction from neps.space.neps_spaces import neps_space from neps.space.neps_spaces.sampling import ( + DomainSampler, OnlyPredefinedValuesSampler, PriorOrFallbackSampler, RandomSampler, ) if TYPE_CHECKING: - from neps.optimizers.neps_priorband import NePSPriorBandSampler from neps.optimizers.utils.brackets import Bracket from neps.space.neps_spaces.parameters import PipelineSpace from neps.state.optimizer import BudgetInfo @@ -58,7 +59,7 @@ class _NePSBracketOptimizer: create_brackets: Callable[[pd.DataFrame], Sequence[Bracket] | Bracket] """The sampler used to generate new trials.""" - sampler: NePSPriorBandSampler + sampler: NePSPriorBandSampler | DomainSampler def __call__( # noqa: C901 self, @@ -133,7 +134,16 @@ def __call__( # noqa: C901 # We need to sample for a new rung. case SampleAction(rung=rung): - config = self.sampler.sample_config(table, rung=rung) + if isinstance(self.sampler, NePSPriorBandSampler): + config = self.sampler.sample_config(table, rung=rung) + elif isinstance(self.sampler, DomainSampler): + _, resolution_context = neps_space.resolve( + self.space, domain_sampler=self.sampler + ) + config = neps_space.NepsCompatConverter.to_neps_config( + resolution_context + ) + config = dict(**config) config = self._convert_to_another_rung(config=config, rung=rung) return SampledConfig( id=f"{nxt_id}_rung_{rung}", diff --git a/neps_examples/basic_usage/priors_test.ipynb b/neps_examples/basic_usage/priors_test.ipynb index 28236d8d9..f85ebf414 100644 --- a/neps_examples/basic_usage/priors_test.ipynb +++ b/neps_examples/basic_usage/priors_test.ipynb @@ -2,78 +2,122 @@ "cells": [ { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "180fcb7f", "metadata": {}, "outputs": [ { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA/wAAAPxCAYAAABHP6YlAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAA7QdJREFUeJzs3QmczfX++PH3MBjbjKXMkCFF9i3ElMqWCcnW/d26YpSIcENRupaQSGXN0iLDRaKLImvWWw0GiSghN8pWiQkZy5z/4/25/++554zZnTPnnO95PR+PrzPnfL/zPd/v9xzz/r4/a4jD4XAIAAAAAACwlTy+PgAAAAAAAOB5JPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA2R8AMAAAAAYEMk/AAAAAAA2BAJPwAAAAAANkTCDwAAAACADZHwAwAAAABgQyT8CFovv/yyhISE5Mp7NWnSxCyWTZs2mff+6KOPcuX9u3XrJrfeeqv4s/Pnz8tTTz0lUVFR5tr0798/3W31XPScAADICLHevxDrgdxHwg9biI+PN4HDWsLCwqRMmTISGxsrU6ZMkT/++MMj73P8+HFz87B7927xN/58bFnx6quvms+xd+/e8s9//lO6dOnilfeZPn26eR/8z4cffiiPP/64VKpUyfz/cb1hBQB/Qaz372PLCmK9b/z222/y+uuvy3333Sc333yzFCtWTBo1amTiP+wvxOFwOHx9EMCN0j/qTzzxhIwaNUoqVKggV65ckZMnT5rS9XXr1km5cuXkk08+kVq1ajl/5+rVq2bRG4as2rFjhzRo0EBmz56drVLny5cvm8f8+fObRz2upk2byuLFi+WRRx7J1rnm5Nj0eqSkpEiBAgXEX2ngCQ0Nlc8//zzTbZOTkyVPnjySL1++bL9PjRo15KabbjKfAf5LE/ydO3ea74/eROr/E64PAH9DrCfWZxWx3t2KFSukY8eO0rp1a/Od1M/gX//6l2zcuFGGDx8uI0eO9PUhwotCvblzILe1atVK6tev73w+ZMgQ2bBhgzz00EPy8MMPy7fffisFCxY06/SPnS7edPHiRSlUqJAz+PtKToJlbjt9+rRUq1YtS9v6883Mjbpw4YIULlw4V99Ta1luueUWc2OlN0kA4M+I9Wkj1geO3I711atXl4MHD0r58uWdrz3zzDPSokULee2112Tw4MG5fu+B3EOTfthes2bNZNiwYfLjjz/KvHnzMuzXpzUEjRs3Nk2dihQpIpUrV5aXXnrJrNNSYi1VV1rDYDUptJqMaS2pJktaU6pNpjT4W7+bul+f5dq1a2Yb7cumf2j1RuXYsWNZ6sPmus/Mji2tfn0abJ577jmJjo42QVXP9Y033pDUjX50P3379pVly5aZ89NtNXCsXr06y8G9e/fuEhkZaWpYateuLXPmzLmuj+ORI0fk008/dR77f/7zn3T3mfqaWM08v/jiCxk4cKBprqbXs0OHDvLLL7+4/d6+fftk8+bNzvfJTvN161rMnz/fXC89n3r16smWLVvcttPvmgZS3UZvOkuWLCl/+ctfrjsn67j1eHT7UqVKSdmyZXO0D60t+fvf/+5sqvf000+b2qazZ89K165dpXjx4mbRoJ76M9bvgCb7ABCoiPXEetffI9a7x3ptEeOa7Fvn2b59e9OS4ocffsjy9UHgoYYfQUH7iGmwXbt2rfTo0SPNbTQ4aO2ANgXU5oIa7A4dOmQCi6patap5XZs+9ezZU+69917z+t133+3WR0prHh599FHTJ1oDX0bGjBlj/uC+8MILJlhOmjTJlLZqs2qrdiIrsnJsrjQI6A2HNuXSAF2nTh1Zs2aNDBo0SH7++WeZOHGi2/YaYJYsWWKCUtGiRU1fyU6dOsnRo0dNcErPn3/+aYKsXkcNnhpwtGmjBnANTs8++6w5dq1hHjBggAmAemOiNJhlV79+/UygGzFihAmWej31fa0+avpct9EbvH/84x/mtcw+o9Q0YOv+NODqd0T7CT744IOyfft2Z+14YmKifPnll+Z7oOekxzJjxgxzLfbv329uEF3pddXz1c9Pb85ysg89L72Z1GZ5W7dulXfeecfcDOg+tJmr9ptcuXKl6cOnx6k3BgBgJ8R6d8R6Yn1msV67xCjt/gAb0z78QKCbPXu2FmM6EhMT090mIiLCUbduXefzESNGmN+xTJw40Tz/5Zdf0t2H7l+30fdL7f777zfrZs6cmeY6XSwbN240295yyy2OpKQk5+uLFi0yr0+ePNn5Wvny5R1xcXGZ7jOjY9Pf1/1Yli1bZrZ95ZVX3LZ75JFHHCEhIY5Dhw45X9Pt8ufP7/ba119/bV6fOnWqIyOTJk0y282bN8/52uXLlx0xMTGOIkWKuJ27Hl+bNm0y3F9618T6/Fu0aOFISUlxvj5gwABH3rx5HWfPnnW+Vr16dbfrlh36Hrrs2LHD+dqPP/7oCAsLc3To0MH52sWLF6/73YSEBPO7c+fOve64Gzdu7Lh69arb9tndR2xsrNu56zXWz7JXr17O1/Q9ypYtm+H538j1AQBvItYT64n1non16rfffnOUKlXKce+992ZyRRDoaMOJoKElvRmN4KslpOrjjz82g97khJYCazO7rNKSVy1Ft+igPqVLlzals96k+8+bN68puXalJe4a61atWuX2utZE3H777c7nWjMSHh6eaRMwfR8tiX7sscfc+hjq++rUPFqC7kla4+HadFNrP7QppTaZ85SYmBjTtM+iJert2rUztSb6Xsq1xkYHUdLaoIoVK5rv2K5du67bp9ZE6efhKrv70Nob13Nv2LCh+Sz1dYu+h/Z7pekeALsi1v8PsT7n7B7r9bvfuXNn0wJj6tSpWbwqCFQk/AgaGnRcA25qf/3rX+Wee+4x88Nq0y9tXrVo0aJs3RDowGfZGbRHp0FzpX/E9Q99Rn3aPEGDok5llPp6aJM7a70rDXSpaXO633//PdP30XNM3T88vfe5UamPU49RZXac2ZH6M1N33HGHGbTJ6kOozRu1yZ7VZ1KbymkzPg2s586du+73tfljatndR+pzj4iIMI/6+6lf9+T1AAB/Qqz/H2J9ztk91mvXAB2f4b333jPjLcDe6MOPoPDTTz+ZP5waYNOjpaw6IIv2ddMBZfQPofbf0oGAtD9g6lLZ9PbhaakHG7JoCXNWjskT0nsff5vV01+OUwOpTpnUv39/U0uggVc/R72xTOumMq3vTXb3kd65p/W6v31uAOAJxHp7xNBAOc5AjfXa/1/HJBg3bpwZ9wL2R8KPoKADxajY2NgMt9PS6ebNm5tlwoQJZvATHfBFbwy0qVt6ATmndIqU1H+cddAb1zmEteRaS3pT0xLz2267zfk8O8emI7V+9tlnptmja8n/d99951zvCbqfPXv2mKDlWvLv6ffJjhv9DFN/Zur77783A+tYgw999NFHEhcXJ2+++aZzm0uXLqX5OabHE/sAgGBCrHdHrM85u8b6adOmmZkrtIBBB5FEcKBJP2xP5+YdPXq0aUql/ZXSc+bMmete0xFtlU5Zoqw5Sj31h3ju3LlufQ31D/+JEyfM6L8W7U+nI7HqtCuWFStWXDelT3aOrXXr1qbW4K233nJ7XUfs1SDp+v43Qt9HR4C1Rs5VV69eNf3FtJ/l/fffL7lNr9ONfH4JCQlu/er0c9C+oC1btnSWsOtj6pJ1PWer319WeGIfABAsiPXXI9YT611Zsw7o/w8t6ELwoIYftqID0GiJsgaaU6dOmRsAnW9XS5c/+eQTM5dqenSqG23m16ZNG7O9Tp2jTZ50mhSdr9cKyDqQysyZM01puQYUHTAlrX5ZWVGiRAmzbx38R49Xp5LRpoiu0wlpP0O9OdDpYP7v//5PDh8+bOYYdh1YJ7vH1rZtW2natKmp0dA+hNp/S5syajDTUt/U+76RgXXefvttMzWPzlmsc+Pquej0R3quGfWz9BYdhEenvHnllVfMtdb5cLUpZ1bpNDdae+Q6VY/VRM6iUz5pTZM2zatWrZq5cdBaloymNUrNE/vIKv3eW/MLa99EnS5Ir4/SeaZ1AQB/Qawn1meGWO9OpxPUwSN1v9qyZf78+W7rdWpH15YksBcSftiKDnyidDAdDbA1a9Y0wUaDbGYBR+eq1YD4/vvvy6+//moGTtFSaf3jbg2KoqPOzpkzR4YMGSK9evUyNxva9yqnNwE6X7A2gxs7dqwp/dc/whpUXOdd1YCjTb20NFYDtI68qqX+1hy2luwcmza505sivV5a4qvbaYDWeVtT7/dGaH+1TZs2yYsvvmiOLSkpSSpXrmzeT28MfEHPWZtIjh8/3lxz/YyzcxOg22s/O/1e6NzEGqDj4+PdmmZOnjzZlNprQNWmeTpAlAbwzJqZuvLEPrJKb5Zdb2LUsGHDzKPOc0zCD8CfEOuJ9Zkh1rvbv3+/aT2ihfpPPvnkdev1syLht68QnZvP1wcBAIFAm0D26dPnuuaRAADAHoj1sBv68AMAAAAAYEM06QcQ9HSwocyaK1pNPQEAQOAh1iNYkfADCHqlS5fOcL1OmaN99wAAQGAi1iNYkfADCHo6unNGypQpYx4Z8gQAgMBErEewYtA+AAAAAABsiEH7AAAAAACwIZr0Z0FKSoocP37czO2qU3UAAOBr2kBP55fWZqg63zZuHPEeAGC3WE/CnwUa/KOjo319GAAAXOfYsWNStmxZXx+GLRDvAQB2i/Uk/FmgJf3WhQ4PD/f14QAAIElJSSY5tWIUbhzxHgBgt1hPwp8FVrM+Df7cAAAA/AlNzz2HeA8AsFusp9MfAAAAAAA2RMIPAAAAAIANkfADAAAAAGBDJPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA2R8AMAAAAAYEMk/AAAAAAA2BAJPwAAAAAANkTCDwAAAACADYX6+gAA+Kfu8YnprpvVrUGOfzcrvw8AAPzbjcb6G7nPAJB1JPwAAAAAsp3UA/B/NOkHAAAAAMCGqOEHEFDoLgAAAABkDQk/AFuhQAAAgP8iJgKgST8AAAAAADZEDT+AoMKowIB/GzdunAwZMkSeffZZmTRpknnt0qVL8txzz8nChQslOTlZYmNjZfr06RIZGen8vaNHj0rv3r1l48aNUqRIEYmLi5OxY8dKaCi3OkCgoWUC4DnU8AMAAL+QmJgob7/9ttSqVcvt9QEDBsjy5ctl8eLFsnnzZjl+/Lh07NjRuf7atWvSpk0buXz5snz55ZcyZ84ciY+Pl+HDh/vgLAAA8B8k/AAAwOfOnz8vnTt3lnfffVeKFy/ufP3cuXMya9YsmTBhgjRr1kzq1asns2fPNon91q1bzTZr166V/fv3y7x586ROnTrSqlUrGT16tEybNs0UAgAAEKxo5wbYFM3hAASSPn36mFr6Fi1ayCuvvOJ8fefOnXLlyhXzuqVKlSpSrlw5SUhIkEaNGpnHmjVrujXx12b/2sR/3759Urdu3TTfU7sH6GJJSkry2vkBAOALJPxAkMqsQAAAcov2zd+1a5dp0p/ayZMnJX/+/FKsWDG31zW513XWNq7JvrXeWpce7eM/cuRID50FAAD+hyb9AADAZ44dO2YG6Js/f76EhYXl6nvr4IDaZcBa9FgAALATv074teS9QYMGUrRoUSlVqpS0b99eDhw44LZNkyZNJCQkxG3p1auX2zY6cq82EyxUqJDZz6BBg+Tq1au5fDYAACA1bbJ/+vRpufPOO82I+rrowHxTpkwxP2tNvfbDP3v2rNvvnTp1SqKioszP+qjPU6+31qWnQIECEh4e7rYAAGAnfp3wa8DXPn06KM+6detMH76WLVvKhQsX3Lbr0aOHnDhxwrmMHz/euY6RewEA8F/NmzeXvXv3yu7du51L/fr1zQB+1s/58uWT9evXO39HC/+1MD8mJsY810fdhxYcWPS+QRP4atWq+eS8AADwB37dh3/16tVuzzVR1xp6rQ247777nK9rzX16JfjWyL2fffaZqSXQ0Xt15N4XXnhBXn75ZdMvEAAA+Ia24qtRo4bba4ULF5aSJUs6X+/evbsMHDhQSpQoYZL4fv36mSRfB+xTWhmgiX2XLl1Mob/22x86dKipNNBafAAAgpVfJ/ypaf86pQHflfb706l4NOlv27atDBs2zBQCqJyM3MuovYB3B/zL6PeZPQBAahMnTpQ8efJIp06dTHzWOD59+nTn+rx588qKFStMbNeCAC0wiIuLk1GjRvn0uAEA8LWASfhTUlKkf//+cs8997jVBPztb3+T8uXLS5kyZWTPnj2m5l6b+i1ZsiTHI/cyai8AAL6zadMmt+c6mN+0adPMkh69F1i5cmUuHB0AAIEjYBJ+bZb3zTffyOeff+72es+ePZ0/a01+6dKlTX/Aw4cPy+23357jUXu16aBrDX90dPQNHD0AAAAAALkrIBL+vn37mqZ6W7ZskbJly2a4bcOGDc3joUOHTMKvzfy3b9+erZF7tb8fff4AZBddFQAAwdRFD4D/8+tR+h0Oh0n2ly5dKhs2bJAKFSpk+js6oq/Smn7FyL0AAAAAgGAU6u/N+BcsWCAff/yxGcXX6nMfEREhBQsWNM32dX3r1q3NaL7ah3/AgAFmBP9atWqZbRm5FwAAAAAQjPw64Z8xY4Z5bNKkidvrs2fPlm7dupkp9XS6vUmTJsmFCxdMP3sdwVcTegsj9wLwFJo+AgAAIJCE+nuT/oxogr958+ZM98PIvbBrgkm/cAAAAAABmfADAAAASB+tzwAE7KB9AAAAAAAgZ0j4AQAAAACwIZr0AwAAAPAouhoA/oGEH/CyjAIeg+4BAAAA8Baa9AMAAAAAYEPU8AMAAACwDVpXAv9Dwg8AftCXkRsQAAAAeBoJPwD8fwwwBAAAADuhDz8AAAAAADZEwg8AAAAAgA3RpB8IYDRBBwAAAJAeEn7Ah0jYAQBARrhXuB7XBMg6mvQDAAAAAGBDJPwAAMCnZsyYIbVq1ZLw8HCzxMTEyKpVq5zrmzRpIiEhIW5Lr1693PZx9OhRadOmjRQqVEhKlSolgwYNkqtXr/rgbAAA8B806YctMMc5AASusmXLyrhx46RSpUricDhkzpw50q5dO/nqq6+kevXqZpsePXrIqFGjnL+jib3l2rVrJtmPioqSL7/8Uk6cOCFdu3aVfPnyyauvvuqTcwIAwB+Q8APwK8HaL49CKwSztm3buj0fM2aMqfXfunWrM+HXBF8T+rSsXbtW9u/fL5999plERkZKnTp1ZPTo0fLCCy/Iyy+/LPnz58+V8wAAwN+Q8AMAAL+htfWLFy+WCxcumKb9lvnz58u8efNM0q8FBMOGDXPW8ickJEjNmjVNsm+JjY2V3r17y759+6Ru3bppvldycrJZLElJSV49NwC+RwE7gg0JPwAA8Lm9e/eaBP/SpUtSpEgRWbp0qVSrVs2s+9vf/ibly5eXMmXKyJ49e0zN/YEDB2TJkiVm/cmTJ92SfWU913XpGTt2rIwcOdKr5wUAgC+R8AMAAJ+rXLmy7N69W86dOycfffSRxMXFyebNm03S37NnT+d2WpNfunRpad68uRw+fFhuv/32HL/nkCFDZODAgW41/NHR0Td8LgAA+AsSfgAIcDRPhB1oP/uKFSuan+vVqyeJiYkyefJkefvtt6/btmHDhubx0KFDJuHXZv7bt2932+bUqVPmMb1+/6pAgQJmAQDArpiWDwAA+J2UlBS3/vWutCWA0pp+pV0BtEvA6dOnndusW7fOTPFndQsAACAYUcMPAAB8SpvWt2rVSsqVKyd//PGHLFiwQDZt2iRr1qwxzfb1eevWraVkyZKmD/+AAQPkvvvuk1q1apnfb9mypUnsu3TpIuPHjzf99ocOHSp9+vShBh8AENRI+IFMmkTTHBoAvEtr5rt27SonTpyQiIgIk8hrsv/AAw/IsWPHzHR7kyZNMiP3ax/7Tp06mYTekjdvXlmxYoUZlV9r+wsXLmzGABg1apRPzwsAAF8j4QcAAD41a9asdNdpgq+D92VGR/FfuXKlh48MAIDARh9+AAAAAABsiBp+AAhydGkBAACwJ2r4AQAAAACwIRJ+AAAAAABsiCb9AAAAgB92qwKAG0UNPwAAAAAANkQNPwDAaxgQEAAAwHdI+BEUaC4HAAAAINjQpB8AAAAAABuihh8AAgCtVAAgcPE3HICvkPADgM1xowkAABCcaNIPAAAAAIANkfADAAAAAGBDft2kf+zYsbJkyRL57rvvpGDBgnL33XfLa6+9JpUrV3Zuc+nSJXnuuedk4cKFkpycLLGxsTJ9+nSJjIx0bnP06FHp3bu3bNy4UYoUKSJxcXFm36Ghfn36CBA0lwYAAADgj/w64928ebP06dNHGjRoIFevXpWXXnpJWrZsKfv375fChQubbQYMGCCffvqpLF68WCIiIqRv377SsWNH+eKLL8z6a9euSZs2bSQqKkq+/PJLOXHihHTt2lXy5csnr776qo/PEAAAAECgVOTM6tYg144FsH3Cv3r1arfn8fHxUqpUKdm5c6fcd999cu7cOZk1a5YsWLBAmjVrZraZPXu2VK1aVbZu3SqNGjWStWvXmgKCzz77zNT616lTR0aPHi0vvPCCvPzyy5I/f/7r3ldbCuhiSUpKyoWzBQAAAAAgSPvwa4KvSpQoYR418b9y5Yq0aNHCuU2VKlWkXLlykpCQYJ7rY82aNd2a+Guzf03i9+3bl+b7aHN/bS1gLdHR0V4+MwAAAAAAgjThT0lJkf79+8s999wjNWrUMK+dPHnS1NAXK1bMbVtN7nWdtY1rsm+tt9alZciQIaZwwVqOHTvmpbMCAAAAACAIm/S70r7833zzjXz++edef68CBQqYBQAAAACAQBUQCb8OxLdixQrZsmWLlC1b1vm6DsR3+fJlOXv2rFst/6lTp8w6a5vt27e77U/XW+sQGIOkMEAKAAAAANioSb/D4TDJ/tKlS2XDhg1SoUIFt/X16tUzo+2vX7/e+dqBAwfMNHwxMTHmuT7u3btXTp8+7dxm3bp1Eh4eLtWqVcvFswEAAGmZMWOG1KpVy8RmXTR2r1q1ym0KXm3pV7JkSTO9bqdOnZyF9xaN/TorT6FChcwAv4MGDTIz/AAAEMz8uoZfg7uOwP/xxx9L0aJFnX3udSC9ggULmsfu3bvLwIEDzUB+epPQr18/c6OgI/QrncZPE/suXbrI+PHjzT6GDh1q9k2z/dwVqPPVB+pxA57A9ETIDdp6b9y4cVKpUiVT2D9nzhxp166dfPXVV1K9enWm4AUAwI4Jv5b4qyZNmri9rlPvdevWzfw8ceJEyZMnjynt16n0dAT+6dOnO7fNmzev6Q7Qu3dvUxBQuHBhiYuLk1GjRuXy2QAAgLS0bdvW7fmYMWPMPYBOsauFAd6YghcAgGDg1wm/lvJnJiwsTKZNm2aW9JQvX15Wrlzp4aMDAACeprX1WpN/4cIFU1Cf2RS8mvCnNwWvFvbrFLx169ZN8720okAXi07ZCwAZoeUbAo1f9+EHAADBQcfb0f752t2uV69eZvwe7ZLnrSl41dixY00XAWuJjo72yrkBAOArJPwAAMDnKleuLLt375Zt27aZmnntfqfN9L1pyJAhcu7cOedy7Ngxr74fAAC5za+b9AMAgOCgtfgVK1Z0zsKTmJgokydPlr/+9a9em4JXWxMwgC8AwM5I+AEAgN9JSUkx/etdp+DVAXrTm4JXB/rTKXh1Sj7FFLwA/K2PP/374Qsk/AAAwKe0aX2rVq3MQHx//PGHGZF/06ZNsmbNGqbgBQDgBpDwAwAAn9Ka+a5du8qJEydMgl+rVi2T7D/wwANmPVPwAgCQMyT8AADAp2bNmpXheqbgBQAgZ0j4YYs5TwEAAAAA7piWDwAAAAAAGyLhBwAAAADAhmjSDwAAANwAuh4C8FfU8AMAAAAAYEMk/AAAAAAA2BAJPwAAAAAANkQffgCAX/Z5ndWtQa4dCwAAgB1Rww8AAAAAgA2R8AMAAAAAYEMk/AAAAAAA2BB9+OGxPrf0twUAAAAA/0ENPwAAAAAANkTCDwAAAACADZHwAwAAAABgQyT8AAAAAADYEAk/AAAAAAA2xCj9yPIo/AAAAACAwEENPwAAAAAANkTCDwAAAACADZHwAwAAnxo7dqw0aNBAihYtKqVKlZL27dvLgQMH3LZp0qSJhISEuC29evVy2+bo0aPSpk0bKVSokNnPoEGD5OrVq7l8NgAABEHC/8MPP3hr1wAAwA94KtZv3rxZ+vTpI1u3bpV169bJlStXpGXLlnLhwgW37Xr06CEnTpxwLuPHj3euu3btmkn2L1++LF9++aXMmTNH4uPjZfjw4R45RgAAApHXEv6KFStK06ZNZd68eXLp0iVvvQ0AAPART8X61atXS7du3aR69epSu3Ztk6hrbf3OnTvdttOa+6ioKOcSHh7uXLd27VrZv3+/OZY6depIq1atZPTo0TJt2jRTCAAAQDDyWsK/a9cuqVWrlgwcONAE5aefflq2b9/urbcDAAC5zFux/ty5c+axRIkSbq/Pnz9fbrrpJqlRo4YMGTJELl686FyXkJAgNWvWlMjISOdrsbGxkpSUJPv27UvzfZKTk8161wUAADvxWsKvpeuTJ0+W48ePy/vvv2+a3jVu3NgE6QkTJsgvv/zirbcGAAC5wBuxPiUlRfr37y/33HOP2Y/lb3/7m6m937hxo0n2//nPf8rjjz/uXH/y5Em3ZF9Zz3VdemMHREREOJfo6OhsHy8AAEE9aF9oaKh07NhRFi9eLK+99pocOnRInn/+eRNUu3btam4OAABA4PJkrNe+/N98840sXLjQ7fWePXuaGnutxe/cubPMnTtXli5dKocPH87xcWvBgbYmsJZjx47leF8AAPijUG+/wY4dO0ypvwbuwoULmxuA7t27y08//SQjR46Udu3a0dQfAHCd7vGJGa6f1a1Brh0LcifW9+3bV1asWCFbtmyRsmXLZrhtw4YNzaMWLtx+++2mS0Hq9zh16pR51HVpKVCggFkAALArryX82pRv9uzZZlqd1q1bm5J4fcyT57+NCipUqGAG5bn11lu9dQgAAMCLPBXrHQ6H9OvXz9TYb9q0yfxeZnbv3m0eS5cubR5jYmJkzJgxcvr0aTMln9IR/3Vgv2rVqnngbGFnFDACsCuvJfwzZsyQJ5980oy6awXj1DQgz5o1y1uHAAAI0ht0bs5zh6divTbjX7BggXz88cdStGhRZ5977VdfsGBB02xf12thQsmSJWXPnj0yYMAAue+++8yggUqn8dPEvkuXLma6Pt3H0KFDzb6pxQcABCuvJfxaql6uXDlnKb9rKb72kdN1+fPnl7i4OG8dAgAA8CJPxXotOFBNmjRxe11bD2hhgu7js88+k0mTJsmFCxfM2ACdOnUyCb0lb968pjtA7969TW2/di3Q9x01apRHzxkAgEDitYRf+9PpID1WszrLmTNnTFO9a9euZWk/2o/v9ddfN3Px6v60uV/79u2d6/VGYM6cOW6/o4P66Jy+ru+pTQWXL19ubkr0JkFHFS5SpMgNnycABLPMmsHC3jwV67WAICOa4G/evDnT/ZQvX15WrlyZpfcEgNxG1xHYapT+9IL3+fPnJSwsLMv70ZL82rVry7Rp09Ld5sEHHzQ3HNbywQcfuK3X0Xx1Dl6tibAGA9LRfgEAgO9jPQAACJAa/oEDB5rHkJAQGT58uBQqVMi5Tkv6t23bZubtzapWrVqZJSPaNy+9EXi//fZbU9ufmJgo9evXN69NnTrV9AN84403pEyZMlk+FgAA4PlYDwAAAiTh/+qrr5yl/nv37jX97iz6s9bW63Q9nqQj+mpzwuLFi0uzZs3klVdeMYP6qISEBClWrJgz2VctWrQwTfv1hqRDhw7X7S85OdkslqSkJI8eLwAAgcwXsR4AAPhBwr9x40bz+MQTT5h+8jodjjdpc/6OHTuavoI6iu9LL71kWgRooq8D+Ogovan7FoaGhkqJEiWcowCnNnbsWDNvMLKHvrwAEBxyO9YDvsY9DoBA5bVB+3Rk3dzw6KOPOn+uWbOmmZ5HBxHSWv/mzZvnaJ9DhgxxNle0avh1wCAAAJD7sR4AAPhBwq817fHx8aakX3/OyJIlS8QbbrvtNrnpppvk0KFDJuHXvv2nT5922+bq1atmBOH0+v3rmADM2QsAgH/GegAA4IOEPyIiwgzgY/3sCz/99JP89ttvUrp0afNc5+I9e/asmdavXr165rUNGzZISkqKNGzY0CfHCABAoPKHWA8AAHyQ8Ls27fNUMz+d2kdr6y1HjhyR3bt3mz74umhf+06dOpnaeu3DP3jwYKlYsaLExsaa7atWrWr6+ffo0UNmzpwpV65ckb59+5quAIzQDwCA72M9ACDzsSJmdWuQa8cC+/BaH/4///zTjN5rTdXz448/ytKlS6VatWrSsmXLLO9nx44d0rRpU+dzq299XFyczJgxQ/bs2SNz5swxtfiawOu+R48e7dYkf/78+SbJ1yb+Ojq/FhBMmTLFo+cLAPAf3DTlDk/FegAAEGAJf7t27Uzfvl69eplk/K677jJT9fz6668yYcIE6d27d5b206RJE3MzkZ41a9Zkug9tCbBgwYJsHT8AAMidWA/kBkbaBxCMvJbw79q1SyZOnGh+/uijj0yTe52391//+pcMHz6cmwAfIdgBADyFWA8AgH/L460dX7x4UYoWLWp+Xrt2rakB0Ob0jRo1Mk3+AABAYCPWAwAQpDX8OnDesmXLpEOHDqbZ/YABA8zrOkWeTuUDAIC/YgyArCHWAwAQpDX82pTv+eefl1tvvdVMf6fT41k1AHXr1vXW2wIAgFxCrAcAIEhr+B955BFp3LixnDhxQmrXru18XUfK15oAAAAQ2Ij1AAAEacKvdPAeXVzpCL4AAMAeiPUAAARhwn/hwgUZN26crF+/3vTlS0lJcVv/ww8/eOutAQDIEDOWeAaxHgCAIE34n3rqKdm8ebN06dJFSpcuLSEhId56K9u50cGiuJEFAOQGYj0AAEGa8K9atUo+/fRTueeee7z1FgAAwIeI9QAABOko/cWLF5cSJUp4a/cAAMDHiPUAAARpwj969GgzXc/Fixe99RYAAMCHPBXrx44dKw0aNJCiRYtKqVKlpH379nLgwAG3bS5duiR9+vSRkiVLSpEiRaRTp05y6tQpt22OHj0qbdq0kUKFCpn9DBo0SK5evXpDxwYAQCDzWpP+N998Uw4fPiyRkZFmft58+fK5rd+1a5e33hoAAOQCT8V6HQdAk3lN+jVBf+mll6Rly5ayf/9+KVy4sNlmwIABpvvA4sWLJSIiQvr27SsdO3aUL774wqy/du2aSfZ1xoAvv/zSTBXYtWtXc0yvvvqqF84eAHJXRuN0ZTbGF4KX1xJ+LZ0HAMCOuOnybKxfvXq12/P4+HhTQ79z506577775Ny5czJr1ixZsGCBNGvWzGwze/ZsqVq1qmzdulUaNWoka9euNQUEn332mSmAqFOnjmmB8MILL8jLL78s+fPn98ixAgAQSLyW8I8YMcJbuwYAAH7AW7FeE3xljQ+gif+VK1ekRYsWzm2qVKki5cqVk4SEBJPw62PNmjVNsm+JjY2V3r17y759+6Ru3brXvU9ycrJZLElJSV45HwAAbJfwq7Nnz8pHH31kmvtpPzoN3Nq8T4PxLbfc4s23BgAAucDTsT4lJUX69+9vRv6vUaOGee3kyZOmhr5YsWJu2+p76DprG9dk31pvrUtv7ICRI0dm+xjhn5iWGAByMeHfs2ePKYnXfnb/+c9/pEePHuYmYMmSJWZQnblz53rrrQEAQC7wRqzXvvzffPONfP755+JtQ4YMkYEDB7rV8EdHR3v9fQEACPhR+jWAduvWTQ4ePChhYWHO11u3bi1btmzx1tsCAIBc4ulYrwPxrVixQjZu3Chly5Z1vq4D8V2+fNm0JnClo/TrOmub1KP2W8+tbVIrUKCAhIeHuy0AANiJ1xL+xMREefrpp697XZv3pde0DgAABA5PxXqHw2GS/aVLl8qGDRukQoUKbuvr1atnRttfv3698zWdtk9bEcTExJjn+rh37145ffq0c5t169aZJL5atWo5PEMAAAKb15r0a6l5WoPffP/993LzzTd7620BAEAu8VSs12b8OgL/xx9/LEWLFnUWFmhXgYIFC5rH7t27mxYF2mVAk/h+/fqZJF8H7FM6jZ8m9l26dJHx48ebfQwdOtTsW48TAIBg5LUa/ocfflhGjRplRtVVISEhpiRep8fp1KmTt94WAADkEk/F+hkzZpiR+Zs0aSKlS5d2Lh9++KFzm4kTJ8pDDz1k9qtT9WkzfR0rwJI3b17THUAftSDg8ccfl65du5rjAwAgWIU4tB2dF2jgfuSRR0xzv/Pnz0uZMmVMabsG4ZUrV0rhwoUlUGjthdYu6DnlRv8+RpkFgMA1q1sDW8Ymu8d6f7mmyDnunxDMciv2IPDiktea9OuBad+5L774Qr7++mtzI3DnnXe6zaELAAACF7EeAAD/5pWEX+fQjY+PN03tdJoebeKnA/Bo8zttUKDPAQBA4CLWAwAQhH34Nchrn76nnnpKfv75Z6lZs6ZUr15dfvzxRzN1T4cOHTz9lgAAIBcR6wEACNIafi3t17l3deqcpk2buq3TqXbat28vc+fONQPpAACAwEOsBwAgSGv4P/jgA3nppZeuuwFQzZo1kxdffFHmz5/v6bcFAAC5hFgPAECQJvx79uyRBx98MN31rVq1MgP7AACAwESsBwAgSBP+M2fOSGRkZLrrdd3vv//u6bcFAAC5hFgPAECQ9uG/du2ahIamv9u8efPK1atXPf22AAAglxDrAcC/dI9PzHD9rG4Ncu1YYPOEX0fu1RF6CxQokOb65ORkT78lAADIRcR6AACCNOGPi4vLdBtG7QUAIHAR6+GPNZgAgFxI+GfPnu3pXQIAAD9CrAcAIEgH7QMAAAAAAL5Hwg8AAAAAgA2R8AMAAAAAYEMk/AAAAAAA2BAJPwAAAAAANkTCDwAAAACADfl9wr9lyxZp27atlClTRkJCQmTZsmVu6x0OhwwfPlxKly4tBQsWlBYtWsjBgwfdtjlz5ox07txZwsPDpVixYtK9e3c5f/58Lp8JAAAAAAC5x+8T/gsXLkjt2rVl2rRpaa4fP368TJkyRWbOnCnbtm2TwoULS2xsrFy6dMm5jSb7+/btk3Xr1smKFStMIULPnj1z8SwAAAAAAMhdoeLnWrVqZZa0aO3+pEmTZOjQodKuXTvz2ty5cyUyMtK0BHj00Ufl22+/ldWrV0tiYqLUr1/fbDN16lRp3bq1vPHGG6blQGrJyclmsSQlJXnt/AAAAAAACMoa/owcOXJETp48aZrxWyIiIqRhw4aSkJBgnuujNuO3kn2l2+fJk8e0CEjL2LFjzX6sJTo6OhfOBgCA4JRZ971u3bqZ112XBx980G0buu8BAGCzhF+TfaU1+q70ubVOH0uVKuW2PjQ0VEqUKOHcJrUhQ4bIuXPnnMuxY8e8dg4AAAS7zLrvKU3wT5w44Vw++OADt/V03wMAIACb9PtCgQIFzAIAAHzbfc+icTkqKirNdTnpvgcAQDAI6Bp+K/CfOnXK7XV9bq3Tx9OnT7utv3r1qmn6l96NAwAA8C+bNm0yLfYqV64svXv3lt9++825Lifd95SO16Pj9LguAADYSUAn/BUqVDBJ+/r1652vabDW4B4TE2Oe6+PZs2dl586dzm02bNggKSkppq8/AADwb9qcXwfl1Xj/2muvyebNm02LgGvXruW4+55izB4AgN35fZN+HXDn0KFDbgP17d692wTxcuXKSf/+/eWVV16RSpUqmQKAYcOGmaZ77du3N9tXrVrV3Cj06NHDTN135coV6du3rxnB35dN/LrHJ/rsvQEACCQasy01a9aUWrVqye23325q/Zs3b57j/eqYPQMHDnSrNCDpBwDYid8n/Dt27JCmTZs6n1uBOS4uTuLj42Xw4MFmsB8dmEdr8hs3bmz68YWFhTl/Z/78+SbJ15sCbd7XqVMnmTJlik/OBwBgb5kV6M7q1iDXjsWubrvtNrnppptMhYDG9px232PMHgCA3fl9wt+kSRNxOBzprtepeUaNGmWW9GhrgAULFnjpCAEAQG766aefTB/+0qVLX9d9r169euY1uu8BABAACT8AALC3jLrv6TJy5EjTOk9r6w8fPmxa91WsWFFiY2P9uvseAAC+RsIPAAD8tvvejBkzZM+ePTJnzhxTi68JfMuWLWX06NFuzfHpvmcPjHEEAJ5Fwg8AAPy6+96aNWsy3Qfd9wAAsNm0fAAAAAAAIG3U8AMAACBX0GQfAHIXNfwAAAAAANgQNfwAAAAAEMSta2Z1a5Brx4LcRQ0/AAAAAAA2RMIPAAAAAIANkfADAAAAAGBDJPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA2R8AMAAAAAYEOhvj4AAAAAAIDvdI9PzPHvzurWwKPHAs+ihh8AAAAAABsi4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhkj4AQAAAACwIRJ+AAAAAABsiIQfAAAAAAAbIuEHAAA+tWXLFmnbtq2UKVNGQkJCZNmyZW7rHQ6HDB8+XEqXLi0FCxaUFi1ayMGDB922OXPmjHTu3FnCw8OlWLFi0r17dzl//nwunwkAAP4l1NcHAAAAgtuFCxekdu3a8uSTT0rHjh2vWz9+/HiZMmWKzJkzRypUqCDDhg2T2NhY2b9/v4SFhZltNNk/ceKErFu3Tq5cuSJPPPGE9OzZUxYsWOCDMwKA4NE9PjHD9bO6Nci1Y8H1SPgBAIBPtWrVyixp0dr9SZMmydChQ6Vdu3bmtblz50pkZKRpCfDoo4/Kt99+K6tXr5bExESpX7++2Wbq1KnSunVreeONN0zLgbQkJyebxZKUlOSV8wMAwFdI+AEAgN86cuSInDx50jTjt0REREjDhg0lISHBJPz6qM34rWRf6fZ58uSRbdu2SYcOHdLc99ixY2XkyJG5ch7BIrOaPgBA7qIPPwAA8Fua7Cut0Xelz611+liqVCm39aGhoVKiRAnnNmkZMmSInDt3zrkcO3bMK+cAAICvUMMPAACCUoECBcwCAIBdUcMPAAD8VlRUlHk8deqU2+v63Fqnj6dPn3Zbf/XqVTNyv7UNAADBiIQfAAD4LR2VX5P29evXuw2up33zY2JizHN9PHv2rOzcudO5zYYNGyQlJcX09QcAIFjRpB8AAPjU+fPn5dChQ24D9e3evdv0wS9Xrpz0799fXnnlFalUqZJzWj4deb99+/Zm+6pVq8qDDz4oPXr0kJkzZ5pp+fr27WsG9EtvhH7kHAPzAUDgIOEHAAA+tWPHDmnatKnz+cCBA81jXFycxMfHy+DBg+XChQvSs2dPU5PfuHFjMw1fWFiY83fmz59vkvzmzZub0fk7deokU6ZM8cn5AADgL0j4AQCATzVp0kQcDke660NCQmTUqFFmSY+2BliwYIGXjhAAgMBEH34AAAAAAGyIhB8AAAAAABsK+IT/5ZdfNk39XJcqVao411+6dEn69OkjJUuWlCJFipg+famn9gEAAAAAwG4CPuFX1atXlxMnTjiXzz//3LluwIABsnz5clm8eLFs3rxZjh8/Lh07dvTp8QIAAAAA4G22GLQvNDTUzNGb2rlz52TWrFlmEJ9mzZqZ12bPnm2m79m6das0atTIB0cLAAAAAID32aKG/+DBg2ae3dtuu006d+4sR48eNa/v3LnTzMXbokUL57ba3F/n9E1ISEh3f8nJyZKUlOS2AAAAAAAQSAI+4W/YsKGZo1fn450xY4YcOXJE7r33Xvnjjz/k5MmTkj9/filWrJjb70RGRpp16Rk7dqxEREQ4l+jo6Fw4EwAAAAAAPCfgm/S3atXK+XOtWrVMAUD58uVl0aJFUrBgwRztc8iQITJw4EDnc63hJ+kHAAAAAASSgK/hT01r8++44w45dOiQ6dd/+fJlOXv2rNs2Okp/Wn3+LQUKFJDw8HC3BQAAAACAQBLwNfypnT9/Xg4fPixdunSRevXqSb58+WT9+vVmOj514MAB08c/JibG14cKAAAAAEGre3xihutndWuQa8diVwGf8D///PPStm1b04xfp9wbMWKE5M2bVx577DHT/7579+6meX6JEiVMTX2/fv1Mss8I/QAAAAAAOwv4hP+nn34yyf1vv/0mN998szRu3NhMuac/q4kTJ0qePHlMDb+Ovh8bGyvTp0/39WEDAAAAAOBVAZ/wL1y4MMP1YWFhMm3aNLMAAADgxprYAkB28DfFt2w3aB8AAAAAACDhBwAAAADAlkj4AQAAAACwIRJ+AAAAAABsiIQfAAAAAAAbIuEHAAAAAMCGSPgBAAAAALAhEn4AAOD3Xn75ZQkJCXFbqlSp4lx/6dIl6dOnj5QsWVKKFCkinTp1klOnTvn0mAEA8DUSfgAAEBCqV68uJ06ccC6ff/65c92AAQNk+fLlsnjxYtm8ebMcP35cOnbs6NPjBQDA10J9fQAAAABZERoaKlFRUde9fu7cOZk1a5YsWLBAmjVrZl6bPXu2VK1aVbZu3SqNGjVKc3/JyclmsSQlJXnx6AEAyH0k/AAAICAcPHhQypQpI2FhYRITEyNjx46VcuXKyc6dO+XKlSvSokUL57ba3F/XJSQkpJvw6++PHDlSgk33+ERfHwIAIJeQ8AMAAL/XsGFDiY+Pl8qVK5vm/Jqo33vvvfLNN9/IyZMnJX/+/FKsWDG334mMjDTr0jNkyBAZOHCgWw1/dHS0BAKSdgDBILO/dbO6Nci1YwlUJPwAAMDvtWrVyvlzrVq1TAFA+fLlZdGiRVKwYMEc7bNAgQJmAQDArhi0DwAABBytzb/jjjvk0KFDpl//5cuX5ezZs27b6Cj9afX5BwAgWJDwAwCAgHP+/Hk5fPiwlC5dWurVqyf58uWT9evXO9cfOHBAjh49avr6AwAQrGjSDwAA/N7zzz8vbdu2Nc34dcq9ESNGSN68eeWxxx6TiIgI6d69u+mPX6JECQkPD5d+/fqZZD+9AfsAAAgGJPwAAMDv/fTTTya5/+233+Tmm2+Wxo0bmyn39Gc1ceJEyZMnj3Tq1MlMtRcbGyvTp0/39WEDAOBTJPwAAMDvLVy4MMP1OlXftGnTzAIAAP6LhB8AAAAAYKtp+5iy779I+AEAAAJs7mkAALKChB8AAAAAEFQFp7OCpAUACT8AAAAAIKh0D5ICgTy+PgAAAAAAAOB51PADAAD4AP30AQDeRg0/AAAAAAA2RMIPAAAAAIANkfADAAAAAGBDJPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA0xLR8AAIAXMO0eAMDXqOEHAAAAAMCGSPgBAAAAALAhmvQDAAAAAJCNblmzujWQQEDCDwAAAACAhwoE/KkwgCb9AAAAAADYEAk/AAAAAAA2RMIPAAAAAIANBVXCP23aNLn11lslLCxMGjZsKNu3b/f1IQEAAA8i1gMAEIQJ/4cffigDBw6UESNGyK5du6R27doSGxsrp0+f9vWhAQAADyDWAwAQpKP0T5gwQXr06CFPPPGEeT5z5kz59NNP5f3335cXX3zRbdvk5GSzWM6dO2cek5KSPHY8l/8877F9AQACh6diibUfh8Phkf0FW6zPjXhPrAeA4JTkT7HeEQSSk5MdefPmdSxdutTt9a5duzoefvjh67YfMWKEXlEWFhYWFha/X44dO5aLEdU+sV4R71lYWFhYxOaxPihq+H/99Ve5du2aREZGur2uz7/77rvrth8yZIhpEmhJSUmRM2fOSMmSJSUkJCRHJTPR0dFy7NgxCQ8Pz+FZBBeuWc5w3XKG65Z9XDPfXzct7f/jjz+kTJkyHju+YIr1no73/J/IGa5b9nHNcobrljNct8CP9UGR8GdXgQIFzOKqWLFiN7xf/cD5j5I9XLOc4brlDNct+7hmvr1uERERHjmeYOWNeM//iZzhumUf1yxnuG45w3UL3FgfFIP23XTTTZI3b145deqU2+v6PCoqymfHBQAAPINYDwBAkCb8+fPnl3r16sn69evdmu3p85iYGJ8eGwAAuHHEegAAgrhJv/bRi4uLk/r168tdd90lkyZNkgsXLjhH8vUmbS6oUwSlbjaI9HHNcobrljNct+zjmuUM1827iPWBh+uWfVyznOG65QzXLfCvWYiO3CdB4q233pLXX39dTp48KXXq1JEpU6ZIw4YNfX1YAADAQ4j1AAAEacIPAAAAAECwCIo+/AAAAAAABBsSfgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhkj4vWzatGly6623SlhYmBklePv27b4+JL8yduxYadCggRQtWlRKlSol7du3lwMHDrhtc+nSJenTp4+ULFlSihQpIp06dZJTp0757Jj9zbhx4yQkJET69+/vfI1rlraff/5ZHn/8cXNdChYsKDVr1pQdO3Y41+sYpsOHD5fSpUub9S1atJCDBw9KsLp27ZoMGzZMKlSoYK7H7bffLqNHjzbXycI1E9myZYu0bdtWypQpY/4vLlu2zG19Vq7RmTNnpHPnzhIeHi7FihWT7t27y/nz53P5THAjiPfpI9bfOGJ91hHrs494b/N4r6P0wzsWLlzoyJ8/v+P999937Nu3z9GjRw9HsWLFHKdOnfL1ofmN2NhYx+zZsx3ffPONY/fu3Y7WrVs7ypUr5zh//rxzm169ejmio6Md69evd+zYscPRqFEjx9133+3T4/YX27dvd9x6662OWrVqOZ599lnn61yz6505c8ZRvnx5R7du3Rzbtm1z/PDDD441a9Y4Dh065Nxm3LhxjoiICMeyZcscX3/9tePhhx92VKhQwfHnn386gtGYMWMcJUuWdKxYscJx5MgRx+LFix1FihRxTJ482bkN18zhWLlypeMf//iHY8mSJXpn5Fi6dKnb+qxcowcffNBRu3Ztx9atWx3//ve/HRUrVnQ89thjPjgb5ATxPmPE+htDrM86Yn3OEO/tHe9J+L3orrvucvTp08f5/Nq1a44yZco4xo4d69Pj8menT582/4E2b95snp89e9aRL18+84fH8u2335ptEhISHMHsjz/+cFSqVMmxbt06x/333++8CeCape2FF15wNG7cON31KSkpjqioKMfrr7/ufE2vZYECBRwffPCBIxi1adPG8eSTT7q91rFjR0fnzp3Nz1yz66W+AcjKNdq/f7/5vcTEROc2q1atcoSEhDh+/vnnXD4D5ATxPnuI9VlHrM8eYn3OEO/tHe9p0u8lly9flp07d5qmHJY8efKY5wkJCT49Nn927tw581iiRAnzqNfwypUrbtexSpUqUq5cuaC/jtqMr02bNm7XRnHN0vbJJ59I/fr15S9/+YtpUlq3bl159913neuPHDkiJ0+edLtuERERpmlusF63u+++W9avXy/ff/+9ef7111/L559/Lq1atTLPuWaZy8o10kdt1qffT4turzFj27ZtPjluZB3xPvuI9VlHrM8eYn3OEO/tHe9DvbbnIPfrr7+a/jCRkZFur+vz7777zmfH5c9SUlJM37R77rlHatSoYV7T/zj58+c3/zlSX0ddF6wWLlwou3btksTExOvWcc3S9sMPP8iMGTNk4MCB8tJLL5lr9/e//91cq7i4OOe1Sev/bLBetxdffFGSkpLMTWTevHnN37QxY8aYvmeKa5a5rFwjfdQbU1ehoaEmGeI6+j/iffYQ67OOWJ99xPqcId7bO96T8MOvSrG/+eYbU6KI9B07dkyeffZZWbdunRkcClm/ydQS1VdffdU811J//b7NnDnT3ATgeosWLZL58+fLggULpHr16rJ7925zo66D1XDNAOQEsT5riPU5Q6zPGeK9vdGk30tuuukmU0KWerRUfR4VFeWz4/JXffv2lRUrVsjGjRulbNmyztf1WmlzybNnz7ptH8zXUZvxnT59Wu68805TKqjL5s2bZcqUKeZnLUnkml1PR0ytVq2a22tVq1aVo0ePmp+ta8P/2f8ZNGiQKfV/9NFHzSjHXbp0kQEDBpgRtxXXLHNZuUb6qP+nXV29etWM5Mt19H/E+6wj1mcdsT5niPU5Q7y3d7wn4fcSbTpUr1490x/GtdRRn8fExPj02PyJjnmhNwBLly6VDRs2mOlAXOk1zJcvn9t11Kl89A93sF7H5s2by969e03pq7VoabY2u7J+5ppdT5uPpp4GSvuqlS9f3vys3z39Y+t63bR5m/apCtbrdvHiRdOvzJUmNvq3THHNMpeVa6SPetOuN/gW/Xuo11n7/sG/Ee8zR6zPPmJ9zhDrc4Z4b/N477XhAGGm6dGRGePj482ojD179jTT9Jw8edLXh+Y3evfubaav2LRpk+PEiRPO5eLFi27Tzuj0PRs2bDDTzsTExJgF/+M6cq/imqU9rVFoaKiZeubgwYOO+fPnOwoVKuSYN2+e23Qq+n/0448/duzZs8fRrl27oJtyxlVcXJzjlltucU7To9PQ3HTTTY7Bgwc7t+Ga/XcU7a+++sosGlYnTJhgfv7xxx+zfI10mp66deuaaaQ+//xzMyo30/IFDuJ9xoj1nkGszxyxPmeI9/aO9yT8XjZ16lTzx1jn59Vpe3TORfyP/mdJa9H5ei36n+SZZ55xFC9e3PzR7tChg7lRQPo3AVyztC1fvtxRo0YNc2NepUoVxzvvvOO2XqdUGTZsmCMyMtJs07x5c8eBAwccwSopKcl8r/RvWFhYmOO2224z888mJyc7t+GaORwbN25M8++Y3kBl9Rr99ttvJuDrvMfh4eGOJ554wtxYIHAQ79NHrPcMYn3WEOuzj3hv73gfov94r/0AAAAAAADwBfrwAwAAAABgQyT8AAAAAADYEAk/AAAAAAA2RMIPAAAAAIANkfADAAAAAGBDJPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA2R8AM21aRJE+nfv7+vDwMAAHgR8R5ARkj4AZtasmSJjB49Okvb/uc//5GQkBDZvXu32N3f//53qVevnhQoUEDq1Knj68MBAOCGEO+v9/XXX8tjjz0m0dHRUrBgQalatapMnjzZ14cF+AQJP4LWyy+/bIJebpW+62LZtGmTee+PPvrIa+9ZokQJKVq0qPm5W7ducuutt4o/O3/+vDz11FMSFRVlrk1GtRV6LnpOOfXkk0/KX//61xz/PgAgMNg91rvGe2L9/+zcuVNKlSol8+bNk3379sk//vEPGTJkiLz11ls3cPRAYCLhhy3Ex8ebwGEtYWFhUqZMGYmNjZUpU6bIH3/84ZH3OX78uLl58MeS8dTH5trET282zp07ZxJdvSkoV66cvPPOO87frVChgnmsW7euuX6uNyzp0SDcvn17GTlypNx8880SHh4uvXr1ksuXLzu3Wb16tTRu3FiKFSsmJUuWlIceekgOHz58XU3Dhx9+KNWqVZNZs2ZJo0aNZPr06fLtt9/KLbfcIoUKFZKaNWvKBx984Pb+uu9+/fqZcyxevLhERkbKu+++KxcuXJAnnnjCnGfFihVl1apVbr9XpUoVKVy4sNx22205vtZ2M2DAALnzzjvNTaNeb60J0e+S3pgBgL8g1qd9bK7x/qeffpJXX33Vb+P9448/bmL9r7/+Kj169JC2bduamviM4r0eY3bj/aVLl8w53n///Sbe6/vqttoaAmI+G/3/o5/Jjh07fH048DISftjKqFGj5J///KfMmDHDBAelAUIDyJ49e9y2HTp0qPz555/ZDrQa8LJ7E7B27VqzeFNGx6bBV/+o169fX7766it55plnpHfv3nLgwAGzfvv27ebxs88+kxMnTmQ5IK5fv94k5lqLoQFaf0+PwaLBeODAgSaY6LZ58uSRDh06SEpKitt+XnzxRRN4tKm9fnYPP/ywPPDAA/Lpp5/KN998Iz179pQuXbo4j1OP+4477pA5c+bITTfdZF7Xz1vP6S9/+YvcfffdsmvXLmnZsqX5vYsXLzrfSwsT9KYR/5OYmCj33nuv+ey0yWPTpk1l3Lhx8uCDD173WQGArxHr0z42TYI1cX7zzTf9Nt5rUq4F7wcPHjS/rz9r7E8r3usx6zmp7Mb7tGK9VnxowTb+W9AfGhrq68NAbnEANjB79myHfp0TExOvW7d+/XpHwYIFHeXLl3dcvHjxht5H96/vo++XFRcuXEjz9Y0bN5r9LF68+IaOJ6Nju//++x3PPvus+VnP/fHHH3dum5KS4ihVqpRjxowZ5vmRI0fM73711VdZfr+4uDhHiRIl3M5R91ekSBHHtWvX0vydX375xbzP3r173d530qRJjgoVKjjatGmT7vvpuueee875XM+vcePGzudXr151FC5c2NGlSxfnaydOnDD7T0hIcL5WvXp187sjRoxw1K5d2+Fvzp8/7/AHb7zxxnXXDgB8iVif9rEFUrwvWbJkhrHeU/HeivWWL774whEaGupYs2aNI9hj/erVqx358+d3DB06NN3/T7AXavhhe82aNZNhw4bJjz/+aPpyZdSvb926dc4maUWKFJHKlSvLSy+9ZNZpqXaDBg3Mz9oszGpSaJUga5OzGjVqmH5j9913n2maZv1u6n59lmvXrplttC+bNjPXmu1jx45lqQ+b6z7TOrbNmzfL/v37zWvadE5Lzy26Xvu2zZ071wxoo+ep9LnDoX//xW3bvn37yrJly8z56WB31atXl59//llq165tztMSExNjmoFb56Al+FrCr83ttLRfmwKq999/3+09tGbmyJEj5hit66o1BVpbo6Xx+lmsWbNGjh496rwm3333ndSqVcs8189AS6p1O202qO+j11NrANTp06edv6d9+fTaaM2CDuqTleaMqa/F/PnzzTWzWiVs2bLFbTv9rmmtim6jgwVpCwutidBjS6t5qh6Pbq+fSdmyZXO0j88//9wMSKjnrt/fp59+2jS3PHv2rHTt2tU0g9Rl8ODB133GabH6gervA4C/C9ZYr8vJkyfNa/r72qTfio1Ka7y1lv2FF14w8Vuvk8pqrNem+iqzeL9161bTdD5v3rxmX9rsXllxW9er3377zS3Wa9NyHXAwrXjvek30nKx4p/vSewptZaDXU+8z9LkV711jvfU+2rR/xIgRpiVAMMf6K1euyLPPPmuW22+/PdNrAXugLQeCgjbz0mCrTe20z1haNDhonzMNKtpcUIPdoUOH5IsvvjDrtV+zvj58+HDT5EybQCttTmbRQNaqVSt59NFHTX8xK+ClZ8yYMeYPuAZiDVKTJk2SFi1amKZ6+oc/q9I6Nt23Nu1LiwYBDSbW4Dma9GsgnDhxolmnj640wGhg1aCkybv2ldQbD+1vn5E2bdqY5ofJyckmEGlfy7Fjx5r9ly9fXtq1a2e2e+WVV8y5awB87rnnZPny5ab5njYv15sADehaKODaX1Dly5fP7fnvv/9ubnz0XPT8dJ/KalKoz7UpoN5Q6DXTGzYdyCc7NGDrmAMacPU7os0Gtem7NjPUmySrifyXX35pvgd6Tnos2vRUb9q0EMb1pknpddXgrZ+fNovMyT70vPRmUgsy9GZI+2zqzYDuQ/twap/OlStXyuuvv26OUz8PV1evXjU3DHqNtVmlNoPVz/quu+7K1vUBAF8Jxlivpk6dmmZs1HiuhQta6K/92bt37y5Lly41Beza7N+1YCS9WN+pUyfTzz4j2mVCE2pNJjt27GiSV03aNcb+61//ktatWzuTS41LmpBrrFeaVOv76DXJarzXeKfvpfvVMQn0d/Pnz++M966xXgsMNP7p/YjGtWCP9Xpt9F5JrwXjGQQRXzcxALzdzM8SERHhqFu3rvO5Nul2/S8wceJE81yboeWkmZ82HdN1M2fOTHOda9Myq5nfLbfc4khKSnK+vmjRIvP65MmTna9p8zxtTpfZPjNq0q9N34oXL+7cdtmyZWbbpk2bmuc///yzed68eXNHSEiI49ChQ85t9XVt+uX62tdff21eL1SokFvTST13q4nfr7/+arbRZd68eWb9v//9b/O8cuXKZjtt6mc1LdTztJr5PfTQQ44nn3zSuV/dX6VKlRzt2rVzXpPIyEjn+Vmff1hYmGPChAnO3xswYIDb+99ok37rfHbs2OF87ccffzTv26FDB+draTUn1WaG+rtz5851vmYdtzZV1CaKrrK7j9jYWNN00xITE2M+y169ejlf0/coW7as2/cm9b6tRT8j/Z4CgL8g1mfcpF9/P2/evOYcXWN9VFSUuQ6u8T47sf6uu+4yTfrTi/djxowx22kT8dTnrvFRz91q0l+6dGm3Jv0ZxXvrmljnZ33+LVq0cJQrV855nhrr9bx13dKlS52xvn79+qY7w6BBgxzZYddYr90eihYt6nj77bfd9kmTfvujST+Chpb0ZjSCr5aQqo8//jjHA5VpKbA2s8sqLXm1ps5TjzzyiJQuXdqUznqTtf+GDRuaR21eprUMOsqtxrrUpb5aE+Ha9EtrRrS0XUvYtcZAS6F1n1qzrs3gtGmdNinTbbQpnDZB3LBhg2mmr7SkXVsXbNu2Lc3jq1SpkmlyqSXWOkiQNlk7depUlj5j16abVu3HL7/84nxNaw30vbUlgNZKaA2LLqlrE9KjzRi1aZ9FS9S1pYLWZmizTeVaY6PXSGuD9Nrqd0wHF0pNa6K0GaSr7O5DPwfXc9fPVj9Lfd2i76EDOf3www/X/b7OkqDXXJtzalNArWVhlH4AgYZY/z+6f/27r4PdWTTe6/FrfNAabB3ILqNYryPyayzQGJlevLemH9SWA9paQuO9xhFrtHytLU9PTuK9tm5IHeut+GvRloXabU+b8Ou9h8Z8XVzvB4It1msLE+12oS07EVxI+BE0NGC5BtzUdF72e+65x/wh1OZ52rxq0aJF2boh0Cb0VrOyrNBA50r/iOsf+tR9tzxN+4xpMq5BX2n/d21St2LFCvNcf3algS41PU9tVqbnoP0Y9fpp00HtL6n0JsCaZ1dvGnREWG1ipqy+azoOQFq0qZlOE6dTLWmzNt2PTgmUmdQjzmqhg3JNXPU9tZnh22+/Ld9//71p5qiLdj3IyWemdMYA7Sdp3UhoQYI22dOuEnqN9WZLm/Fpk3nXm6vU0yS5yu4+Un9GERER5lF/P/Xr2pwvNb2p05s9vaF57bXXTHNL/VlvmAAgUBDr3WO9dqVzTTI1To4fP978rF3arK516cV6jaOaPDdv3jzdeK/97bX5uCao+uga763jSE9O4n3q47RivSuNk5pAa7cFLVyxFmsMhGCL9dr8X2e20C6V1ngHCB704UdQ0EFs9A+nBtj0aCmrDsiyceNGM6CMDlSjpd86wI32B0xdKpvePjwt9WBDFi1hzuiYtMTdtTbB9bnSoGoFa6U3PzpQjJYq6w2Qq4zeR/uRuU7N40r7nmkps+tAN1oSbSWROjhNWoPK6MA9WtOcEe1LZ/XRt2jff32/1HRwJteAqwE19fXwJO1jN3v2bNMPUWsJNPDq56jXNa2byrS+N9ndR3qfUVqvZ2XQPu2Hqf1hFy5caAZrAgB/F4yxXlnxTPura4G6xg1Xqafwi4uLM4O2Pf/8826JeWbvk1G813uH1IPa6fvWqVPH/Kz99jX2WAPCZiXeW9ta52cNnKjHmVZhiX6m1gCH2pJBW64R6/9LW1xoSwi9B7KunY7toHR6Ri20SavAB/ZAwo+goKWaSkuQM6KlnlqKrcuECRPM4CdaAq5BRGs/0wvIOaWj2Kf+46zN4VxH2NWS67RGStcSc22aZcnOsemAeToHrzZ7dK0J0ZHvrfWeoPvROZE1aLmWKHv6fbLjRj/D1J+Z0pYCWrhhzULw0UcfmRsqHRTJos0aszPivSf2cSO0Rkc/t7RqGADAHxHr3RHrc85usV4Tev0updXKQFtraEEDs/LYF206YHval0ynfNE/cp07d053uzNnzlz3mlUyrcmP0n7NylN/FHUaPNe+hvqHX0tadfRfi/an06ZYrn3Mtel96il9snNsOrKt1hq89dZbbq9rUy8Nkvr+2g9SF6XN363nuvz73//O0vnp+2ifOa09cR0NXkcU1v3oqL65Ta/TjXx+CQkJbv3q9HPQvqDaT9AqYdfH1LXoes6p+xhmxBP7yAq9FtrsMbX33nvPPKbVYgIA/A2xPmexXqUX63Wxrklm70Os9+9Yr6P56wwNrou2LlBvvPGGmS0B9kUNP2xl1apVpkRZA40O+qI3ADoYjJYuf/LJJ2YAufToVDfaHE0HlNPtdeocnYZFm8dZTcI1IGuztZkzZ5rScg0oOmBKWiWmWaFN2XTfOviPHq82UdemiK7TCWlTe7050Cbs//d//2fmrNU+aannT83OsekUO02bNjU1Gtq0S5tsa1NGDWbarEz3ZTUB1H5s2rRMB+hx7b+ozeIzm8NeB9bRGwhtZqj95rV5np6LTn+k55pRP0tv0UF4dMobnQpQr7U2+7PmJc4K7Z+otUeuU/Uo12aOOuWT1jRpibk2KdQbB61l0S4MWeWJfWSFNnfUc9FuH/pZ682mFujowI2a7OuUUwDgT4j1nov1SuN9WrFeaSsIPXarOX1aiPX+H+u1oCI1q0BEC2Qo3Lc3En7Yig58onQwHQ2wOqerBhsNspkFHG3SpAHx/fffN/2aNKHVP4L6x90aFEUHutP54YcMGSK9evUyNxva9yqnNwE6X7A2g9O56bX0XwOrBhXXeVc14GhTL212qAFa/yhrqb81h60lO8emTe70pkivl5bI63YaoLUvn7Vf1z6Qev4Z9YlMj/ZX04TyxRdfNMeWlJRk5ufV99MbA1/Qc9ZmbTpokV5z/YyzcxOg22s/O/1eaBM5DdB6I+TaNHPy5Mmm1F5LzLVpng4QpQE8s2amrjyxj6zQ/yN6Q6g3gFrjpDUNehOo12nQoEHZGpgKAHIDsd5zsV5Z8T2tWJ+VpvHEev+P9QhuITo3n68PAgACgd749OnT57rmkQAAwB6I9bAb+vADAAAAAGBDNOkHEPR0sKHMmitaTT0BAEDgIdYjWJHwAwh6pUuXznC9TpmT0YBFAADAvxHrEaxI+AEEPR3dOSNlypQxjwx5AgBAYCLWI1gxaB8AAAAAADbEoH0AAAAAANgQTfqzICUlRY4fP27mds3KfKQAAHibNtDT+aW1GarOt40bR7wHANgt1pPwZ4EG/+joaF8fBgAA1zl27JiULVvW14dhC8R7AIDdYj0JfxZoSb91ocPDw319OAAASFJSkklOrRiFG0e8BwDYLdaT8GeB1axPgz83AAAAf0LTc88h3gMA7Bbr6fQHAAAAAIANkfADAAAAAGBDJPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA2R8AMAAAAAYEMk/AAAAAAA2BAJPwAAAAAANkTCDwAAAACADZHwAwAAAABgQ6G+PgAAwad7fGK662Z1a5CrxwIAgDfjmiK2AfAVv6/h//nnn+Xxxx+XkiVLSsGCBaVmzZqyY8cO53qHwyHDhw+X0qVLm/UtWrSQgwcPuu3jzJkz0rlzZwkPD5dixYpJ9+7d5fz58z44GwAAAAAAcodfJ/y///673HPPPZIvXz5ZtWqV7N+/X958800pXry4c5vx48fLlClTZObMmbJt2zYpXLiwxMbGyqVLl5zbaLK/b98+WbdunaxYsUK2bNkiPXv29NFZAQAAAAAQ5E36X3vtNYmOjpbZs2c7X6tQoYJb7f6kSZNk6NCh0q5dO/Pa3LlzJTIyUpYtWyaPPvqofPvtt7J69WpJTEyU+vXrm22mTp0qrVu3ljfeeEPKlCnjgzMDAAAAACCIE/5PPvnE1Nb/5S9/kc2bN8stt9wizzzzjPTo0cOsP3LkiJw8edI047dERERIw4YNJSEhwST8+qjN+K1kX+n2efLkMS0COnTocN37Jicnm8WSlJTk9XMFAgl9FQEAAAD/59dN+n/44QeZMWOGVKpUSdasWSO9e/eWv//97zJnzhyzXpN9pTX6rvS5tU4fS5Uq5bY+NDRUSpQo4dwmtbFjx5qCA2vRVgYAAAAAAAQSv074U1JS5M4775RXX31V6tata/rda+2+9tf3piFDhsi5c+ecy7Fjx7z6fgAAAAAABFWTfh15v1q1am6vVa1aVf71r3+Zn6OioszjqVOnzLYWfV6nTh3nNqdPn3bbx9WrV83I/dbvp1agQAGzAMh9dBcAAAAAgqCGX0foP3DggNtr33//vZQvX945gJ8m7evXr3frb69982NiYsxzfTx79qzs3LnTuc2GDRtM6wHt6w8AAAAAgB35dQ3/gAED5O677zZN+v/v//5Ptm/fLu+8845ZVEhIiPTv319eeeUV089fCwCGDRtmRt5v3769s0XAgw8+6OwKcOXKFenbt68Z0I8R+gEAAAAAduXXCX+DBg1k6dKlpk/9qFGjTEKv0/B17tzZuc3gwYPlwoULpn+/1uQ3btzYTMMXFhbm3Gb+/PkmyW/evLkZnb9Tp04yZcoUH50VAAAAAADeF+LQyeyRIe0moKP16wB+4eHhvj4cwO/72XsTffiB/yI2eR7XFDnF+DMA/DUu+XUffgAAAAAAkDMk/AAAAAAA2BAJPwAAAAAANuTXg/YBQGr0kwQAAACyhhp+AAAAAABsiIQfAAAAAAAbIuEHAAAAAMCGSPgBAAAAALAhEn4AAAAAAGyIUfoB5Gg0fAAAAAD+jRp+AAAAAABsiIQfAAAAAAAbIuEHAAAAAMCGSPgBAAAAALAhEn4AAAAAAGyIhB8AAAAAABsi4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhkj4AQAAAACwIRJ+AAAAAABsiIQfAADkmnHjxklISIj079/f+dqlS5ekT58+UrJkSSlSpIh06tRJTp065fZ7R48elTZt2kihQoWkVKlSMmjQILl69arbNps2bZI777xTChQoIBUrVpT4+PhcOy8AAPwRCT8AAMgViYmJ8vbbb0utWrXcXh8wYIAsX75cFi9eLJs3b5bjx49Lx44dneuvXbtmkv3Lly/Ll19+KXPmzDHJ/PDhw53bHDlyxGzTtGlT2b17tylQeOqpp2TNmjW5eo4AAPgTEn4AAOB158+fl86dO8u7774rxYsXd75+7tw5mTVrlkyYMEGaNWsm9erVk9mzZ5vEfuvWrWabtWvXyv79+2XevHlSp04dadWqlYwePVqmTZtmCgHUzJkzpUKFCvLmm29K1apVpW/fvvLII4/IxIkTfXbOAAD4Ggk/AADwOm2yrzXwLVq0cHt9586dcuXKFbfXq1SpIuXKlZOEhATzXB9r1qwpkZGRzm1iY2MlKSlJ9u3b59wm9b51G2sfaUlOTjb7cF0AALCTUF8fAAAAsLeFCxfKrl27TJP+1E6ePCn58+eXYsWKub2uyb2us7ZxTfat9da6jLbRJP7PP/+UggULXvfeY8eOlZEjR3rgDAEA8E/U8AMAAK85duyYPPvsszJ//nwJCwsTfzJkyBDTpcBa9FgBALATEn4AAOA12mT/9OnTZvT80NBQs+jAfFOmTDE/ay289sM/e/as2+/pKP1RUVHmZ31MPWq/9TyzbcLDw9Os3Vc6mr+ud10AALATmvQDsJXu8dc3GXY1q1uDXDsWACLNmzeXvXv3ur32xBNPmH76L7zwgkRHR0u+fPlk/fr1Zjo+deDAATMNX0xMjHmuj2PGjDEFBzoln1q3bp1J0KtVq+bcZuXKlW7vo9tY+wAAIBj5dQ3/yy+/bObqdV30BsHT8/YCAADvKFq0qNSoUcNtKVy4sInd+nNERIR0795dBg4cKBs3bjQtArRAQBP1Ro0amX20bNnSJPZdunSRr7/+2ky1N3ToUHMPoLX0qlevXvLDDz/I4MGD5bvvvpPp06fLokWLzJR/AAAEK7+v4a9evbp89tlnzufa/M+iQfzTTz818/bqDYNOwaPz9n7xxRdu8/ZqMz+d3ufEiRPStWtXU5Pw6quv+uR8AACAO506L0+ePKbgXkfO19H1NWG35M2bV1asWCG9e/c2BQFaYBAXFyejRo1ybqNT8uk9gd4bTJ48WcqWLSvvvfee2RcAAMEqxOFwOMSPa/iXLVsmu3fvvm6dDq5z8803y4IFC8w8u0pL9HXuXZ2CR2sFVq1aJQ899JAcP37cOXKvztOrTQh/+eUXMypwVugIv1qgoO9J/z4Ei8yaxgcqmvTDLohNnsc1RU7RnQyAv8Ylv6/hP3jwoJQpU8aM7Kul+jqFjs7Nm9m8vZrwpzdvr9YQ6Ly9devWTfM9tXZBFwvz8sKO7JrQAwAAAAiAPvwNGzaU+Ph4Wb16tcyYMUOOHDki9957r/zxxx8em7c3LVqooCUp1qIDCgEAAAAAEEj8uoa/VatWzp9r1aplCgDKly9vBuFJb4odT83Lq4MHudbwk/QDAAAAAAKJX9fwp6a1+XfccYccOnTIDMTniXl708K8vAAAAACAQBdQCf/58+fl8OHDUrp0aalXr55z3l5LWvP26ty/Om+vJfW8vQAAAAAA2JFfN+l//vnnpW3btqYZv460P2LECDM1z2OPPeY2b2+JEiVMEt+vX7905+0dP3686befet5eAAAAAADsyK8T/p9++skk97/99puZgq9x48aydetW87On5u0FAAAAAMCO/DrhX7hwYYbrdaq+adOmmSU92jpg5cqVXjg6AAAAAAD8V0D14QcAAAAAAFlDwg8AAAAAgA35dZN+ADnXPT7R14cAAAAAwIeo4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhkj4AQAAAACwIRJ+AAAAAABsKNTXBwAAual7fGK662Z1a5CrxwIAAAB4EzX8AAAAAADYEAk/AAAAAAA2RJN+AMhCc39Fk38AAAAEEmr4AQAAAACwIRJ+AAAAAABsiIQfAAAAAAAbIuEHAAAAAMCGSPgBAAAAALAhEn4AAAAAAGyIhB8AAAAAABsi4QcAAAAAwIZI+AEAAAAAsKFQXx8AAACAHXWPT8xw/axuDXLtWAAAwYkafgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhkj4AQAAAACwIRJ+AAAAAABsiIQfAAAAAAAbIuEHAAAAAMCGSPgBAAAAALChgEr4x40bJyEhIdK/f3/na5cuXZI+ffpIyZIlpUiRItKpUyc5deqU2+8dPXpU2rRpI4UKFZJSpUrJoEGD5OrVqz44AwAAAAAAckfAJPyJiYny9ttvS61atdxeHzBggCxfvlwWL14smzdvluPHj0vHjh2d669du2aS/cuXL8uXX34pc+bMkfj4eBk+fLgPzgIAAAAAgNwREAn/+fPnpXPnzvLuu+9K8eLFna+fO3dOZs2aJRMmTJBmzZpJvXr1ZPbs2Sax37p1q9lm7dq1sn//fpk3b57UqVNHWrVqJaNHj5Zp06aZQoC0JCcnS1JSktsCAAAAAEAgCYiEX5vsay19ixYt3F7fuXOnXLlyxe31KlWqSLly5SQhIcE818eaNWtKZGSkc5vY2FiTxO/bty/N9xs7dqxEREQ4l+joaK+dGwAAAAAAQZnwL1y4UHbt2mWS8NROnjwp+fPnl2LFirm9rsm9rrO2cU32rfXWurQMGTLEtB6wlmPHjnnwjADP6R6fmO4CAP5gxowZpjteeHi4WWJiYmTVqlUeH4tn06ZNcuedd0qBAgWkYsWKpvseAADBzq8Tfk20n332WZk/f76EhYXl2vvqzYJ1Y2ItAAAg+8qWLWsG3dVWeTt27DBd8Nq1a+dsZeeJsXiOHDlitmnatKns3r3bDO771FNPyZo1a3xyzgAA+Au/Tvj15uD06dOmxD40NNQsejMwZcoU87PW1OsNwNmzZ91+T2sGoqKizM/6mLqmwHpubQMAALyjbdu20rp1a6lUqZLccccdMmbMGFOTr2PteGosnpkzZ0qFChXkzTfflKpVq0rfvn3lkUcekYkTJ/r47AEA8C2/TvibN28ue/fuNaX11lK/fn0zgJ/1c758+WT9+vXO3zlw4IBp+qdNBpU+6j604MCybt06U2tfrVo1n5wXAADBSGvrtavehQsXTHz21Fg8uk3qcX50G2sf6WGQXgCA3YWKHytatKjUqFHD7bXChQubfn7W6927d5eBAwdKiRIlTBLfr18/cxPRqFEjs75ly5Ymse/SpYuMHz/e9NsfOnSo6S+oTfcBAIB3acG7xmbtr6+1+0uXLjWxWQvvPTEWT3rbaAL/559/SsGCBdM8Lh0faOTIkR49VwAA/Ilf1/BnhTbXe+ihh8wgP/fdd59ppr9kyRLn+rx588qKFSvMo95sPP7449K1a1cZNWqUT48bAIBgUblyZZPcb9u2TXr37i1xcXGmmb6vMUgvAMDu/LqGPy06Cq8rHcxP+/Hpkp7y5cvLypUrc+HoAABAalqLryPnK+2nn5iYKJMnT5a//vWvzrF4XGv5U4/Fs3379gzH4klvvB5t+Zde7b7Sln609gMA2FnAJfwA4CuZTXc4q1uDXDsWIJClpKSY/vOa/Ftj8WhLvfTG4tGB/nQsHp2SL62xeHSb1AX7uo21DwAAghUJPwAA8GqzeR1ZXwfi++OPP2TBggWmtZ5OmRcREeGRsXh69eolb731lgwePFiefPJJ2bBhgyxatEg+/fRTH589AAC+RcIPAAC8RmvmdeycEydOmAS/Vq1aJtl/4IEHnGPx5MmTx9Twa62/jq4/ffr068bi0b7/WhCgg/fqGACuY/HolHya3A8YMMB0FShbtqy89957Zl8AAAQzryX8P/zwg9x2223e2j0AAPAyT8TyWbNmZbjeU2PxNGnSRL766qscHycAAHbktVH6dXCepk2byrx588w0PAAAILAQywEACGxeS/h37dplmu1pvzwdPffpp5++bpRdAADgv4jlAAAENq8l/HXq1DH96I4fPy7vv/++6bvXuHFjqVGjhkyYMEF++eUXb701AADwAGI5AACBzWsJvyU0NFQ6duwoixcvltdee00OHTokzz//vERHRzsH8QEAAP6LWA4AQGDyesK/Y8cOeeaZZ6R06dKmNkBvEA4fPmzmx9Uag3bt2nn7EAAAwA0glgMAEJi8Nkq/3hDMnj1bDhw4IK1bt5a5c+eaR516x5pCJz4+Xm699VZvHQIA5Kru8YnprpvVrUGuHgvgCcRyAAACm9cS/hkzZsiTTz4p3bp1MzUCaSlVqlSm0/UAAADfIJYDABDYvJbwazO/cuXKOWsBLA6HQ44dO2bW5c+fX+Li4rx1CICta4wBwNuI5QAABDav9eG//fbb5ddff73u9TNnzpgmgAAAwL8RywEACGxeS/i19D8t58+fl7CwMG+9LQAA8BBiOQAAgc3jTfoHDhxoHkNCQmT48OFSqFAh57pr167Jtm3bzLy+AADAPxHLAQCwB48n/F999ZWzVmDv3r2mb59Ff65du7aZzgcAAPgnYjkAAPbg8YR/48aN5vGJJ56QyZMnS3h4uKffAgAAeBGxHAAAe/DaKP06by8AAAhcxHIAAAKbRxP+jh07Snx8vKkJ0J8zsmTJEk++NQAA8ABiOQAA9uHRhD8iIsIM8GP9DAAAAguxHAAA+wj1VtM/mgECABB4iOUAANhHHm/t+M8//5SLFy86n//4448yadIkWbt2rbfeEgAAeBCxHACAwOa1hL9du3Yyd+5c8/PZs2flrrvukjfffNO8PmPGDG+9LQAA8BBiOQAAgc1rCf+uXbvk3nvvNT9/9NFHEhUVZWoG9MZhypQp3npbAADgIcRyAAACm9cSfm0CWLRoUfOzNv3TkX7z5MkjjRo1MjcLAADAvxHLAQAIbF5L+CtWrCjLli2TY8eOyZo1a6Rly5bm9dOnT5upfgAAgH8jlgMAENi8lvAPHz5cnn/+ebn11lulYcOGEhMT46whqFu3rrfeFgAAeAixHACAwObRaflcPfLII9K4cWM5ceKE1K5d2/l68+bNpUOHDt56WwAA4CHEcgAAApvXEn6lg/vo4kpH+AUAAIGBWA4AQODyWsJ/4cIFGTdunKxfv9709UtJSXFb/8MPP3jrrQEAgAcQywEACGxeS/ifeuop2bx5s3Tp0kVKly4tISEh3norAADgBcRyAAACm9cS/lWrVsmnn34q99xzT473MWPGDLP85z//Mc+rV69uBhBq1aqVeX7p0iV57rnnZOHChZKcnCyxsbEyffp0iYyMdO7j6NGj0rt3b9m4caMUKVJE4uLiZOzYsRIa6tXeDAAABDxPxHIAAGDDUfqLFy8uJUqUuKF9lC1b1jQl3Llzp+zYsUOaNWsm7dq1k3379pn1AwYMkOXLl8vixYtNDcTx48fNHMGWa9euSZs2beTy5cvy5Zdfypw5cyQ+Pt4UGgAAAO/HcgAAYMOEf/To0SaxvnjxYo730bZtW2ndurVUqlRJ7rjjDhkzZoyppd+6daucO3dOZs2aJRMmTDAFAfXq1ZPZs2ebxF7XW9MG7d+/X+bNmyd16tQxLQP0uKZNm2YKAQAAgHdjOQAA8B2vtWt/88035fDhw6Z5vc7fmy9fPrf1u3btytb+tLZea/J1ACGdB1hr/a9cuSItWrRwblOlShUpV66cJCQkSKNGjcxjzZo13Zr4a7N/beKvrQTSm0NYuwfoYklKSsrWsQJZ1T0+0deHAAC5FssBAIBNEv727dt7ZD979+41Cb7219fa/aVLl0q1atVk9+7dkj9/filWrJjb9npTcvLkSfOzProm+9Z6a116tI//yJEjPXL8AAAEKk/FcgAAYLOEf8SIER7ZT+XKlU1yr034P/roIzPonvbX96YhQ4bIwIED3Wr4o6OjvfqeAAD4G0/FcgAAYLM+/Ors2bPy3nvvmQT6zJkzzuZ/P//8c5b3obX4FStWNH30tea9du3aMnnyZImKijL98PU9XJ06dcqsU/qoz1Ovt9alp0CBAhIeHu62AAAQjDwRywEAgM0S/j179piB9l577TV54403nIn5kiVLzE1DTqWkpJj+9VoAoH0J169f71x34MABMw2fdgFQ+qhdAk6fPu3cZt26dSaB124BAAAg92M5AAAI8IRfm8R369ZNDh48KGFhYc7XddT9LVu2ZGkfejOh2/7nP/8xibs+37Rpk3Tu3FkiIiKke/fu5n02btxoBvF74oknTJKvA/apli1bmsS+S5cu8vXXX8uaNWtk6NCh0qdPH1OLDwAAvBvLAQCADfvwJyYmyttvv33d67fcckuGA+a50pr5rl27yokTJ0yCX6tWLZO0P/DAA2b9xIkTJU+ePNKpUydT668j8E+fPt35+3nz5pUVK1aYUfm1IKBw4cJmDIBRo0Z58EwBALAnT8RyAABgw4Rfa9DTms7u+++/l5tvvjlL+5g1a1aG67W2Ydq0aWZJT/ny5WXlypVZej8A8NcpGmd1a5BrxwJ4MpYDAAAbNul/+OGHTU36lStXzPOQkBDTv/6FF14wNfIAAMC/EcsBAAhsXkv433zzTTl//rypAfjzzz/l/vvvN6PtFy1aVMaMGeOttwUAAB5CLAcAILB5rUm/9rnXEfG/+OILM2Ce3jDceeed0qJFC2+9JQAA8CBiOQAAgc0rCb9OnRcfH2+m7dER9rUJYIUKFSQqKkocDod5DgAA/BexHACAwOfxJv16E6B9/p566in5+eefpWbNmlK9enX58ccfzdQ+HTp08PRbAgAADyKWAwBgDx6v4dfaAJ2bd/369dK0aVO3dRs2bJD27dvL3LlzzXR7AADA/xDLAQCwB4/X8H/wwQfy0ksvXXeDoJo1ayYvvviizJ8/39NvCwAA/DCWjx07Vho0aGAG+itVqpQpLDhw4IDbNpcuXZI+ffpIyZIlpUiRImYGgFOnTrlto7MDtGnTRgoVKmT2M2jQILl69arbNps2bTJjDOh0gjq4oBZcAAAQzDye8O/Zs0cefPDBdNe3atXKDPwDAAD8kydj+ebNm00yv3XrVjMAoE7x17JlS7lw4YJzmwEDBsjy5ctl8eLFZvvjx49Lx44dneuvXbtmkv3Lly/Ll19+KXPmzDHJ/PDhw53bHDlyxGyjhRS7d++W/v37my4Ja9asyfF1AAAg0Hm8Sf+ZM2ckMjIy3fW67vfff/f02wIAAD+M5atXr3Z7rom61tDv3LlT7rvvPjl37pzMmjVLFixYYFoPqNmzZ0vVqlVNIUGjRo1k7dq1sn//fvnss8/Me9epU0dGjx4tL7zwgrz88suSP39+mTlzphlUUKcSVPr7n3/+uUycOFFiY2Nv6HoAABCoPF7Dr6XwoaHplyPkzZv3uiZ4AADAf3gzlmuCr0qUKGEeNfHXWn/Xqf6qVKki5cqVk4SEBPNcH3XgQNdCCE3ik5KSZN++fc5tUk8XqNtY+0hLcnKy2YfrAgCAnYR6Y2RfHcFX+8+lF1wBAID/8lYs16n+tKn9PffcIzVq1DCvnTx50tTQFytWzG1bTe51nbVN6hYH1vPMttEk/s8//5SCBQumOb7AyJEjc3QuAAAEZcIfFxeX6TaM6gsAgP/yVizXvvzffPONaWrvD4YMGSIDBw50PtfCgejoaJ8eEwAAfp3wa787AAAQuLwRy/v27SsrVqww0/2VLVvW+XpUVJQZjO/s2bNutfw6Sr+us7bZvn272/6sUfxdt0k9sr8+Dw8PT7N2X2kLhvRaMQAAYAce78MPAADg2j1Ak/2lS5fKhg0bzMB6rurVqyf58uWT9evXO1/Taft0Gr6YmBjzXB/37t0rp0+fdm6jI/5rMl+tWjXnNq77sLax9gEAQDDyeA0/AACAazN+HYH/448/lqJFizr73EdERJiad33s3r27aVqvA/lpEt+vXz+TqOsI/Uqn8dPEvkuXLjJ+/Hizj6FDh5p9WzX0vXr1krfeeksGDx4sTz75pClcWLRokXz66ac+PX8AAHyJGn4AAOA1M2bMMCPzN2nSREqXLu1cPvzwQ+c2OnXeQw89JJ06dTJT9Wnz/CVLlrjNCqDdAfRRCwIef/xxM4bAqFGjnNtoywFN7rVWv3bt2mZ6vvfee48p+QAAQY0afgAA4NUm/ZkJCwuTadOmmSU95cuXl5UrV2a4Hy1U+Oqrr3J0nAAA2BE1/AAAAAAA2BAJPwAAAAAANkTCDwAAAACADZHwAwAAAABgQyT8AAAAAADYEAk/AAAAAAA2RMIPAAAAAIANkfADAAAAAGBDob4+ACDQdY9PzHD9rG4Ncu1YAAAA4J/3hdwTwheo4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhvw64R87dqw0aNBAihYtKqVKlZL27dvLgQMH3La5dOmS9OnTR0qWLClFihSRTp06yalTp9y2OXr0qLRp00YKFSpk9jNo0CC5evVqLp8NAAAAAAC5x68T/s2bN5tkfuvWrbJu3Tq5cuWKtGzZUi5cuODcZsCAAbJ8+XJZvHix2f748ePSsWNH5/pr166ZZP/y5cvy5Zdfypw5cyQ+Pl6GDx/uo7MCAAAAAMD7QsWPrV692u25JupaQ79z506577775Ny5czJr1ixZsGCBNGvWzGwze/ZsqVq1qikkaNSokaxdu1b2798vn332mURGRkqdOnVk9OjR8sILL8jLL78s+fPn99HZAUDWMa8vAAAAbFXDn5om+KpEiRLmURN/rfVv0aKFc5sqVapIuXLlJCEhwTzXx5o1a5pk3xIbGytJSUmyb9++NN8nOTnZrHddAAAAAAAIJAGT8KekpEj//v3lnnvukRo1apjXTp48aWroixUr5ratJve6ztrGNdm31lvr0hs7ICIiwrlER0d76awAAAAAAAjyhF/78n/zzTeycOFCr7/XkCFDTGsCazl27JjX3xMAAAAAgKDpw2/p27evrFixQrZs2SJly5Z1vh4VFWUG4zt79qxbLb+O0q/rrG22b9/utj9rFH9rm9QKFChgFgAAAAAAApVf1/A7HA6T7C9dulQ2bNggFSpUcFtfr149yZcvn6xfv975mk7bp9PwxcTEmOf6uHfvXjl9+rRzGx3xPzw8XKpVq5aLZwMAAAAAQO4J9fdm/DoC/8cffyxFixZ19rnXfvUFCxY0j927d5eBAweagfw0ie/Xr59J8nWEfqXT+Gli36VLFxk/frzZx9ChQ82+qcUHAAAAANiVXyf8M2bMMI9NmjRxe12n3uvWrZv5eeLEiZInTx7p1KmTGV1fR+CfPn26c9u8efOa7gC9e/c2BQGFCxeWuLg4GTVqVC6fDQAAAAAAuSfU35v0ZyYsLEymTZtmlvSUL19eVq5c6eGjAwAAAADAf/l1H34AAAAAAJAzJPwAAAAAANgQCT8AAAAAADZEwg8AAAAAgA359aB9gD/oHp/o60MAAAAAgGyjhh8AAAAAABsi4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIhB+wDA5gNLzurWINeOBQAAAP6DGn4AAAAAAGyIhB8AAAAAABsi4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAG2JaPsDHU6YBAAAAgDdQww8AAAAAgA1Rww8AuYCWHgAAAMht1PADAACv2bJli7Rt21bKlCkjISEhsmzZMrf1DodDhg8fLqVLl5aCBQtKixYt5ODBg27bnDlzRjp37izh4eFSrFgx6d69u5w/f95tmz179si9994rYWFhEh0dLePHj8+V8wMAwJ+R8AMAAK+5cOGC1K5dW6ZNm5bmek3Mp0yZIjNnzpRt27ZJ4cKFJTY2Vi5duuTcRpP9ffv2ybp162TFihWmEKFnz57O9UlJSdKyZUspX7687Ny5U15//XV5+eWX5Z133smVcwQAwF/RpB8AAHhNq1atzJIWrd2fNGmSDB06VNq1a2demzt3rkRGRpqWAI8++qh8++23snr1aklMTJT69eubbaZOnSqtW7eWN954w7QcmD9/vly+fFnef/99yZ8/v1SvXl12794tEyZMcCsYAAAg2FDDDwAAfOLIkSNy8uRJ04zfEhERIQ0bNpSEhATzXB+1Gb+V7CvdPk+ePKZFgLXNfffdZ5J9i7YSOHDggPz+++/pvn9ycrJpHeC6AABgJyT8AADAJzTZV1qj70qfW+v0sVSpUm7rQ0NDpUSJEm7bpLUP1/dIy9ixY00Bg7Vo338AAOyEJv0AI6gDQFAaMmSIDBw40Plca/hJ+gEAdkINPwAA8ImoqCjzeOrUKbfX9bm1Th9Pnz7ttv7q1atm5H7XbdLah+t7pKVAgQJm5H/XBQAAO6GGHwCCvAXLrG4Ncu1YAFcVKlQwCfn69eulTp06zlp27Zvfu3dv8zwmJkbOnj1rRt+vV6+eeW3Dhg2SkpJi+vpb2/zjH/+QK1euSL58+cxrOqJ/5cqVpXjx4j47PwAAfI0afgAA4DXnz583I+brYg3Upz8fPXpUQkJCpH///vLKK6/IJ598Inv37pWuXbuakffbt29vtq9atao8+OCD0qNHD9m+fbt88cUX0rdvXzOCv26n/va3v5kB+7p3726m7/vwww9l8uTJbs31AQAIRtTwAwAAr9mxY4c0bdrU+dxKwuPi4iQ+Pl4GDx4sFy5cMNPnaU1+48aNzTR8YWFhzt/Rafc0yW/evLkZnb9Tp04yZcoU53odcG/t2rXSp08f0wrgpptukuHDhzMlHwAg6JHwAwAAr2nSpIk4HI5012st/6hRo8ySHh2Rf8GCBRm+T61ateTf//73DR0rAAB2Q5N+AAAAAABsyO8T/i1btkjbtm1NPz2tBVi2bJnbeq010GZ7pUuXloIFC0qLFi3k4MGDbtvoSL6dO3c2o+8WK1bM9PHTPoUAAAAAANiV3yf82q+vdu3aMm3atDTXjx8/3vTjmzlzphnVt3DhwhIbGyuXLl1ybqPJvg7ioyP2rlixwhQi0K8PAAAAAGBnft+Hv1WrVmZJi9buT5o0SYYOHSrt2rUzr82dO1ciIyNNSwAdwffbb781g/8kJiZK/fr1zTZTp06V1q1byxtvvOEc4RcAAAAAADvx+xr+jOjUPidPnjTN+F1H6tV5eRMSEsxzfdRm/Fayr3R7HeVXWwSkJTk52cwD7LoAAAAAABBIAjrh12RfaY2+K31urdPHUqVKua0PDQ01I/5a26Q2duxYU3BgLdHR0V47BwAAAAAAgrJJvy8MGTLEOU+w0hp+kv7A1j0+0deHAAAAAAC5KqBr+KOioszjqVOn3F7X59Y6fTx9+rTb+qtXr5qR+61tUitQoIAZ0d91AQAAAAAgkAR0wl+hQgWTtK9fv96tNl775sfExJjn+nj27FnZuXOnc5sNGzZISkqK6esPAAAAAIAd+X2T/vPnz8uhQ4fcBurbvXu36YNfrlw56d+/v7zyyitSqVIlUwAwbNgwM/J++/btzfZVq1aVBx98UHr06GGm7rty5Yr07dvXjODPCP0AAAAAALvy+4R/x44d0rRpU+dzq299XFycxMfHy+DBg+XChQvSs2dPU5PfuHFjMw1fWFiY83fmz59vkvzmzZub0fk7deokU6ZM8cn5AAAAAACQG/w+4W/SpIk4HI5014eEhMioUaPMkh5tDbBgwQIvHSEAAAAAAP7H7xN+AIDvZrGY1a1Brh4LAAAAPCegB+0DAAAAAABpo4Yftq+hBAAAAIBgRA0/AAAAAAA2RMIPAAAAAIANkfADAAAAAGBDJPwAAAAAANgQCT8AAAAAADbEKP0AgBzPgDGrW4NcOxYAAABkDzX8AAAAAADYEDX8sEUtIwAAAADAHTX8AAAAAADYEAk/AAAAAAA2RJN+AECOMagfAACA/6KGHwAAAAAAG6KGH36DgfkAAAAAwHOo4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAG2LQPgCATwbjZMo+AAAA76KGHwAAAAAAG6KGH7mGafcAAAAAIPeQ8AMAAAAAYMNuizTpBwAAAADAhqjhh8fQZB8AAAAA/Ac1/AAAAAAA2BA1/ACAgGw15E/94wAAAPwRCT8AwCfoBgQAAOBdJPzIFm7QAQAAACAwkPDDDQk9AAAAANgDg/YBAAAAAGBDQVXDP23aNHn99dfl5MmTUrt2bZk6darcddddvj4sAICHMeBf8CLWAwAQhAn/hx9+KAMHDpSZM2dKw4YNZdKkSRIbGysHDhyQUqVKSbCgyT4Au7iRv2cUCNgTsR4AgCBN+CdMmCA9evSQJ554wjzXm4FPP/1U3n//fXnxxRfdtk1OTjaL5dy5c+YxKSlJ/EGf+TszXD+tc710113+87wXjggA7KXLjI05/jubW6yY5HA4fH0oARnrcyPeZxZz/eW+AjeOzxpZ+S7wPbCXy7nwWXsk1juCQHJysiNv3ryOpUuXur3etWtXx8MPP3zd9iNGjNArysLCwsLC4vfLsWPHcjGi2ifWK+I9CwsLC4vYPNYHRQ3/r7/+KteuXZPIyEi31/X5d999d932Q4YMMU0CLSkpKXLmzBkpWbKkhISE5KhkJjo6Wo4dOybh4eE5PIvgxLXLOa5dznHtbgzXL3eunZb2//HHH1KmTJlcOz47xXpvxHtXwf7/gPPn/Dl/zp/zP3bD5++JWB8UCX92FShQwCyuihUrdsP71Q88GL/0nsC1yzmuXc5x7W4M18/71y4iIiJXjseuvBXvXQX7/wPOn/Pn/Dn/YBXuofO/0VgfFNPy3XTTTZI3b145deqU2+v6PCoqymfHBQAAPINYDwBAkCb8+fPnl3r16sn69evdmu3p85iYGJ8eGwAAuHHEegAAgrhJv/bRi4uLk/r165v5eHWqngsXLjhH8vUmbS44YsSI65oNInNcu5zj2uUc1+7GcP1yjmsXuLE+tWD/LDl/zp/z5/w5/wLiD0J05D4JEm+99Za8/vrrcvLkSalTp45MmTLFzNMLAADsgVgPAECQJvwAAAAAAASLoOjDDwAAAABAsCHhBwAAAADAhkj4AQAAAACwIRJ+AAAAAABsiITfy6ZNmya33nqrhIWFmVGCt2/f7utD8jtjx46VBg0aSNGiRaVUqVLSvn17OXDggNs2ly5dkj59+kjJkiWlSJEi0qlTJzl16pTPjtlfjRs3TkJCQqR///7O17h2Gfv555/l8ccfN9enYMGCUrNmTdmxY4dzvY5rOnz4cCldurRZ36JFCzl48KAEu2vXrsmwYcOkQoUK5rrcfvvtMnr0aHO9LFy7/9qyZYu0bdtWypQpY/5/Llu2zG19Vq7TmTNnpHPnzhIeHi7FihWT7t27y/nz53P5TJDVzzQtmzZtkjvvvNNM01SxYkWJj4+XYLoGev66XepFZ1Ow431LWhYvXixVqlQx94Qaa1auXCmBKCfnr9/31J+9XodANGPGDKlVq5b5e6xLTEyMrFq1Kig++5ycv50++6zee/vbd4CE34s+/PBDMyewzsO4a9cuqV27tsTGxsrp06d9fWh+ZfPmzSYh3bp1q6xbt06uXLkiLVu2NHMnWwYMGCDLly83/1l0++PHj0vHjh19etz+JjExUd5++23zR9gV1y59v//+u9xzzz2SL18+E6z2798vb775phQvXty5zfjx4820XjNnzpRt27ZJ4cKFzf9jLUgJZq+99poJ+joF2rfffmue67WaOnWqcxuu3X/p3zL9+68FwGnJynXSZH/fvn3mb+SKFStMstWzZ89cPAtk5zNN7ciRI9KmTRtp2rSp7N6929wYPvXUU7JmzRoJlmtg0cTwxIkTzkUTRjvet6T25ZdfymOPPWYK67766iuTJOvyzTffSDCcv9Lk0PWz//HHHyUQlS1b1iR5O3fuNBUEzZo1k3bt2pm/0Xb/7HNy/nb67LN67+133wGdlg/ecddddzn69OnjfH7t2jVHmTJlHGPHjvXpcfm706dPaxWhY/Pmzeb52bNnHfny5XMsXrzYuc23335rtklISPDhkfqPP/74w1GpUiXHunXrHPfff7/j2WefNa9z7TL2wgsvOBo3bpzu+pSUFEdUVJTj9ddfd76m17RAgQKODz74wBHM2rRp43jyySfdXuvYsaOjc+fO5meuXdr0/97SpUudz7Nynfbv329+LzEx0bnNqlWrHCEhIY6ff/45l88AmX2maRk8eLCjevXqbq/99a9/dcTGxjqC5Rps3LjRbPf777877H7fkpb/+7//M383XTVs2NDx9NNPO4Lh/GfPnu2IiIhw2FXx4sUd7733XtB99lk5f7t+9n+kc+/tj98Bavi95PLly6bkS5tmWvLkyWOeJyQk+PTY/N25c+fMY4kSJcyjXkctPXa9ltokply5clzL/09L2rX2yPUaKa5dxj755BOpX7++/OUvfzG1THXr1pV3333XrVZOm5u6Xr+IiAjTPSfYr9/dd98t69evl++//948//rrr+Xzzz+XVq1amedcu6zJynXSR23Gr99Vi26vMUVbBMD/6WeY+u+ztuIIxv8LderUMd1XHnjgAfniiy/EjvctwfYdyMr5K+2GVL58eYmOjs60RjiQurctXLjQtG7Qpu3B9tln5fzt+tn3Sefe2x+/A6G58i5B6NdffzX/CSIjI91e1+ffffedz47L36WkpJimjtrMukaNGuY1vRnOnz+/ueFNfS0Dse+fp+kfWu0yos2KUuPaZeyHH34wzdK1681LL71kruHf//53c83i4uKc1yit/8fBfv1efPFFSUpKMgVIefPmNX/vxowZY5qeK65d1mTlOulj6mbPoaGh5uaaaxkY9HNK6zPW/0N//vmnGbvB7jTJ124rWnCVnJws7733njRp0sQUWunYBna6b8nOdyDQ/w9n9fwrV64s77//vmn6rAUEb7zxhik41sRPm4gHmr1795oEV7te6fhIS5culWrVqgXNZ5+d87fbZ5/Zvbc/fgdI+OF3pWXan0VrCpG5Y8eOybPPPmv60NlpAJTcvFHRm89XX33VPNcafv3+6U2pJvxI36JFi2T+/PmyYMECqV69urNfsg7gxbUDkNZNvy4WveE/fPiwTJw4Uf75z39KoAr2+5asnr8mh641wPr5V61a1fR/1gFfA41+lzXuaQL70UcfmbinYxukl/TaTXbO326f/bEAvPemSb+X3HTTTabWK/Vo6Po8KirKZ8flz/r27WsGo9q4caNbiZ9eL+0icfbsWbftuZb/bbKvg0Bq7YjW+Omif3B1ADD9WUsPuXYZ1zilDk4ahI4ePWp+tq4R/4+vN2jQIFPL/+ijj5rRZrt06WIGiNTRmxXXLmuycp30MfVgr1evXjUj93MtA4N+Tml9xjqQVTDU7qfnrrvukkOHDond7luy8x0I5P/D2Tn/1HSwXC1kD9TPX1sC6mwb9erVM3FPB7CcPHly0Hz22Tl/u332OzO599YWj/72HSDh9+J/BP1PoH1cXWsT9XlGfVyCkY73o0FDmwNt2LDBTPPlSq+j/nFwvZY6yq8mZcF+LZs3b26aVWkpq7VojbU2q7Z+5tqlT5sgpp5KSPukaz8zpd9F/WPsev20Ca42QQ3263fx4kXTh9yVFnLq3znFtcuarFwnfdRCO73JsOjfSr3W2tcf/k8/Q9fPWGntULD/X9A4pQWvdrtvsft3ICfnn5omRXr/Eoiff1r077F2VbH7Z5+T87fbZ988k3tvvRfyu+9ArgwNGKQWLlxoRlqOj483oyz37NnTUaxYMcfJkyd9fWh+pXfv3mb0zk2bNjlOnDjhXC5evOjcplevXo5y5co5NmzY4NixY4cjJibGLLhe6pFCuXbp2759uyM0NNQxZswYx8GDBx3z5893FCpUyDFv3jznNuPGjTP/bz/++GPHnj17HO3atXNUqFDB8eeffzqCWVxcnOOWW25xrFixwnHkyBHHkiVLHDfddJMZjdzCtfvfSL5fffWVWTTsTpgwwfz8448/Zvk6Pfjgg466des6tm3b5vj888/NyMCPPfaYD88quGX2mb744ouOLl26OLf/4YcfzN+WQYMGmZlSpk2b5sibN69j9erVjmC5BhMnTnQsW7bM/K3du3eviVN58uRxfPbZZw473rfoues1sHzxxRcm3rzxxhvmOzBixAgzi45ei2A4/5EjRzrWrFnjOHz4sGPnzp2ORx991BEWFubYt2+fI9DoeemMBBr79G+2PtdZU9auXWv7zz4n52+nzz6r997+9h0g4feyqVOnmmQrf/78Zpq+rVu3+vqQ/I7eKKS16DQeFr3xfeaZZ8y0H3rT1KFDBxNckPkfHa5dxpYvX+6oUaOGKZyrUqWK45133nFbr9OmDRs2zBEZGWm2ad68uePAgQOOYJeUlGS+Z/r3TQP3bbfd5vjHP/7hSE5Odm7DtXOfjiz1ooUmWb1Ov/32m0nwixQp4ggPD3c88cQTJuGCf36m+qh/i1P/Tp06dcz9gP5/cY1xwXANXnvtNcftt99u/l6UKFHC0aRJE1MQbdf7Fj1361pYFi1a5LjjjjvMd0Cnafz0008dwXL+/fv3d94P69+61q1bO3bt2uUIRDolbfny5c253HzzzeZvtpXs2v2zz8n52+mzz+q9t799B0L0n9xpSwAAAAAAAHILffgBAAAAALAhEn4AAAAAAGyIhB8AAAAAABsi4QcAAAAAwIZI+AEAAAAAsCESfgAAAAAAbIiEHwAAAAAAGyLhBwAAAADAhkj4AZtq0qSJ9O/f39eHAQAAvIh4DyAjJPyATS1ZskRGjx6dpW3/85//SEhIiOzevVvs7LfffpMHH3xQypQpIwUKFJDo6Gjp27evJCUl+frQAADIEeJ95rG/bNmy5rzPnj3r68MBch0JP2BTJUqUkKJFi0qguXLlitf2nSdPHmnXrp188skn8v3330t8fLx89tln0qtXL6+9JwAA3kS8z1j37t2lVq1aufJegD8i4YfPvfzyy6bUNbeaveli2bRpk3nvjz76KFfev1u3bnLrrbfmehM/fc9XX31VnnzySXNTUK5cOXnnnXec21aoUME81q1b11yP0qVLS1RUlPk5vWaCei7t27eXkSNHys033yzh4eEmcb58+bJzm9WrV0vjxo2lWLFiUrJkSXnooYfk8OHD19U0fPjhh3L//fdLWFiYzJ8/35TGP/bYY3LLLbdIoUKFpGbNmvLBBx9cd379+vUzx1e8eHGJjIyUd999Vy5cuCBPPPGEOc+KFSvKqlWrnL+j2/Xu3Vvq168v5cuXl+bNm8szzzwj//73v7N9fbWwQI9dzyHQ/18AgN0E072FxmuNwTmJ967HnR5PxPt9+/aZ99P1+hgaGppuvJ83b54MHjzYtMLTgvqbbrrJxHv9Pb1PyEq8t8yYMcPU6j///PM3fJ2BQEXCD4+ykiBr0T/M2nw6NjZWpkyZIn/88YdH3uf48eMmmPtjkzR/PbY333zTJLpfffWVSXI18T1w4IBZt337dvOotd0aVH/55Rez/p///Kd06dIl3X2uX79evv32W3Nzowm5NivUGwKLBuOBAwfKjh07zLYauDt06CApKSlu+3nxxRflzjvvlEGDBpnvyqVLl6RevXry6aefyjfffCM9e/Y0x2Edp2XOnDnmRkBf1+PWY/7LX/4id999t+zatUtatmxpfu/ixYvpflZ6zFrYAADwT9xbZO/YshrvT5w4YWJgVtxovJ8+fbp51M/v73//u/n99OJ9165d5fXXX5dHHnnExHltjq+PqkePHlmO9/v375dRo0bJ3LlzzfF48hoDAcUBeNDs2bMd+rUaNWqU45///Kfj/fffd7z66quOli1bOkJCQhzly5d3fP31126/c+XKFceff/6ZrfdJTEw076Pvlx3JyclmsWzcuNHsZ/HixdnaT06P7fLly45Lly45csP999/vePbZZ83Pet0ff/xx57qUlBRHqVKlHDNmzDDPjxw5Yo75q6++cjRs2NBxzz33ZLr/uLg4R4kSJRwXLlxwvqb7K1KkiOPatWtp/s4vv/xi3mfv3r1u7ztp0iRHmzZtzHGmR9c/99xzbufXuHFj5/OrV686Chcu7OjSpYvztRMnTpj9JyQkuO3r0UcfdRQsWNCsa9u2bba/f9b76e/ptfSGESNGmOMDgGDHvUXGx3bvvfc6+vTpk+14n1WeiPd16tRxxvvMlC5d2uw7dbzX39fYmJV4r/datWrVMt8X18/k999/9/jnD/g7avjhFa1atZLHH3/cNLUaMmSIrFmzxpQmnz59Wh5++GH5888/ndtqsy4trfcmq8Q3f/78ZvGVfPnymcHifMG1/5qWsGsTQP08UtPXtMldVtSuXds0wbPExMTI+fPn5dixY+b5wYMHTVO92267zTQBtLozHD161G0/WhPh6tq1a2YAIm3ap30TixQpYr5DqX/P9Zzy5s1rmhHq71i02Z91Tq4mTpxoagQ+/vhj0+RQayWySmsxrPfT722wNbvX2hqtkQGA3Ma9Rdq09lrPN7vxPjtuNN5r0/2sxvuTJ09eVyOf3Xiv34+qVaua74udpNdiEcgICT9yTbNmzWTYsGHy448/mv5ZGfWzW7dunbMvmP7xr1y5srz00ktmnTYna9CggflZg77VxE+b/Cntj1ajRg3ZuXOn3HfffSZAWb+bup+da8DRbTQoFi5c2Nw4WEHMosFL+7Gl5rrPzI4trT78mkA+99xzpq+aFgboub7xxhtateu2ne5HR5RftmyZOT/dtnr16qbfXFZokqYD12hQ1JsgHbROk15X2qfvyJEjpmmddeyZ9VHXz/Kuu+4y1/nee+81r23ZssU8tm3bVr777jvT50/f0+rvp7+j19zy7LPPmvfU74bVt0+/Fy+88IJs3LhRtm3bZvodrly50jm6vibqqb83es2WL19umvlrnz4doE/po+7Pop+z3hhq3z49P33UY9+6dWuazUg3b95smkWWKlXKNC10XZf6+mgfQu0ioO+vNz36fViwYIFzvY4XoM0QtV+ldS4DBgxwu1HNKf1+6f+XH374wTSV1O+yNnvVJo2pv0/6HdOmkHrTVLBgQdOkMq3+ptb3Tvta6vdNj9n6zmV3H4sXL5Zq1aqZbfVmce/evWb922+/bfpf6ndE/y95a1wEAPbDvYWYOPv++++7VS643lvo31rt+uCNe4tff/3V3Ftocr1o0SKzP022NW4rvc/Q16zz1uvvGju16f7kyZNNvNfPT49bj1Fnz7HOU/vg6zmlPm59TbstaEFQRESEeX3o0KEmlm/YsMHEHL2f0AKCpk2bOsfy0XPU3/n666+d+8vsGmfGuifQ+5+nn37axEW9B9DuCb///rvbtlrZ0KZNG+eMQbfffrsp9HC9L8rsO5fdfezZs8fcm+g+NN5asVrvbxo2bGjisv5/0AI02A8JP3KV1R987dq16W6jA7voYC/JyckmUdG+aBokv/jiC7Neg4q+rrSvl/Yz10X/GFq0JFn/mNepU0cmTZrk/EOfnjFjxpiEUwOO9i3Tm4IWLVpkOwnLyrG50qCm56Y1zjpd3IQJE8wfXO3Lnlat8+eff24Sz0cffVTGjx9vkvhOnTo5S84zqpWdNm2aOZbOnTubAKsBUAOGBlqrZkKPXZNlvW7WsWuynh4NqvqZatDV39VrrsFV++HpMWmfQU189YZGB+DRa6u0/5/227c89dRT5j31vfU99Wf9zLRkXkvwNSE+dOiQKayYOnWqGTzo559/vm6AHn1PTahbt24tr732mglg6X3HNMHXYK81EkpvFjUwWjcprvSaa1/A4cOHux13WgFfA/CZM2fMDc+4cePMubjeOOkNiJbQa/9DPRdNzPVRbwo8QYO9fpf0Wul3RJPwESNGmMWVfu5awKOfmw7wpJ+bFkTo/4PU9MZJP4O//vWv5vesQqvs7EM/F735jIuLMzfi2hdU/5/r91JvRPUa6/c+ISHBDDYFAFnFvUXG9xaaFGoCqn9j9W+1ck0Ms3JvofHS9bg1/mshhl57PRbdn/at18H3NCm1BgrUQnJdr7X36pVXXnG7t9Drr4XyGu81IdW++npPoQXA1nm6tixwpWMQWLFc7zGsGn6N5VoIpK9rf3wtdNcCe6XbaTzTQhB9P+23n5NrnB4tPNH4pnFO47oWlus9i2tBi94raIGT3udpHNU4nd79RXrfuezsQwsc9Luvib1+vlpAoJ+1Dpisj3rPpPcrWtii4yZ4akwM+BFf9ymAPfvZaT+o9ERERDjq1q2bbl/liRMnmufa/ysn/ay0r5eumzlzZprrdLFYfbpuueUWR1JSkvP1RYsWmdcnT57sfE37xWk/tsz2mdGx6e+79lNftmyZ2faVV15x2+6RRx4x/RIPHTrkfE23y58/v9tr2mdRX586dWqGffiLFy9utps3b55zvfZtK1u2rOknd+bMGdOnXY9DX4uNjXVkpkOHDmafur320fv0008dkZGRjhdffNH0GdR+fSVLljT95Q8ePOhYv369o0GDBuZ3tN9loUKFHN99952zL6FrH/4BAwY4oqOjHV988YVj3Lhx5lro9u3atXO+f6VKlczv6jZq586d5vl9993ndpz6mtXvT49R+342a9bMXMv33nvPUbVqVTNmwfHjxx1FixZ1+33r+6x9B7XPoCtrnfaHVGfPnjW/r2MgpO436trP/+LFi9ddy7Fjx5pz/PHHH2+oD79+v/R3+vXr5/beem31fF3/T6U+Dh1fokaNGubapL5+efLkcezbt++698vOPgoUKOC8Vurtt982r0dFRbn93xsyZIjbdQUA7i0yPjaNvRp/rP09+eSTbvcWtWvXNtfDurfQv8e67uTJk1m6t9Dj03uFxx57zMQCK943b97cbDd37lwT73XsgP379zuqVatm4oauW7p0qdmHXou0xg5wjff6u0899ZQjb968zvNxvZ+xYrl1njVr1jTHfvjwYfOarp81a9Z1sVz78+s9iGsffo0xeh10XIisXOOsfkfr1atnYqFl/Pjx5vWPP/44w/uAp59+2tznuI7zlNF3Lrv7WLBggfM1695LP6OtW7c6X1+zZg1jGNgUNfzIdVoimVHpodV/XGufU4/mnlVaeqlNsrJKS2Fd57DVEk6dmk6bkHuT7l9r2q2ab4vWhGrsSl2DrTUD2mzLtU+bNhnTJtwZ0VJ5PT+rNltp0zMt7dU+eFrCrrWs2rT6p59+ksTExEyP3eqXpyXDWpqutb9ao2A1o9T+dwsXLjQl7NqcTEvUrRF9tVm31nK7TtnjSpvk6aj9WvutJdZa82/VzGjzQV2s74k2+VdWLbo2F0yP1vhrrYPWWF+9etW0AtBjXrFihfm8//a3v5maDm1K6EpHBdbPKSNac6Pfay1dT91v1LVZqWurAy1N13PRZvH6eWvTRE/QGgbX99bn2p3Ctame63Fo6f+5c+dMTUnqbh5Ka0H0M0stO/vQKRBdu7Pod09pLZLr/z3r9cy+0wDginuL/9Ea5ozuLTp27Gjivdb8Z/XeQv+GV6pUydR4W/HeGh9AWw5qvNem59rqS+8rsnqNXeO93kvo/jTmZ0bPQ1sRau25jhtg0ZYEqWO5fm7WmADaEkFrza0uHWnFqxuhrQNcux9oaz5t/eb6mbvGTv3O6n2Axk69L9LuGVn5zmVnH3quWpNv0fPW/w/aqsGKuYr4a1//G+EDyCUaCKymVWnRQPLee++ZZt6aPGmQ0eCkgTIr06oobVKWnQF0NIi50iCmfZy83ZdYm5FrwHW9IVD6R9ha70r7faem/dFS9w+z+qNZdM55veau10+buWky/q9//cu8T58+fcw116RME/TMWDdW2hxdbxzSojcR2oRdA7om2Vo4oLRZmhWEUvcntAK29idUmmjqzYv2DVSpuxhYA/ToOej5uU4TpDQJtfr2aVO4pUuXmhuuf/zjH87me67XXW9StK+h9mFMPW9xRqzCi8yunRaUaCHGJ598ct3npsd6o/QauN78qDvuuMM8un6ftZBDm1bq90ALUixpDUKY3vlnZx+pv7vWZ6L9S9N6Pa3vNACkJ5jvLbTZvhXzdd9WM37r3sKaZs6KMXoeVqG9HlNW7y00vrrG2CpVqphz1Oun8V67vim9t9Am6G+99ZZJyJUmvdrlTV9PL967dgHTBNZinZs2Xbdo9zuN5Zq8Wqz7Cb0fcI3l+rMWqOu11+5urt0ZtKuDJ6X+zPU+R4/T9TPX7iXWfVHqCobU9wHpfeeysw/tVpE6LmusJf4GDxJ+5CqtPdY/RPpHNz1aaqmDnmjNrfZ905pb7WekA/No/7zMalqtfXhaeqOxa+DIyjF5Qnrvk1bS7A90oB2tHdaaAk2utQZBa7+1RF37NGalBkC30X78Or5BWlIHLG/w1PdJvysPPPCA6eOv5683S9oHUscj0EGbclrrlF16M6W1M1pTo3Mj682I1kjMnj3bbYDBjM4/u/tI77sbaN9pAP6He4sbY/e/wzpugfbp1/FhdGA7LWTQQor+/fvnWtzN6X1RWt+57O6D+AsSfuQqHQBFadOtjOgfYi1910UTPf1jrTWyGqi1FNnTU6HpdDKp/9jpIHGu08Boabf+kU1NS5Jda1Szc2xa867NrLW23LWW32qOpes9QfejI7RqEHCtycjofbRUOj3a1cA6Xi3RT11i71oqr03nlixZ4jbwjc4EkFp6100DmdYW6Hcho2ur56Dnp/t2LWHXz9GVthDQAYC0KWBqej30+uSkEMFqDqkDFqZ306mDBOnsCDookesgfdodwFP0GmhzPKtWX+l7KqtJvbbq0JsDndLKdZpITdazyhP7AABP4N7ixu4ttJVe6tHotWm4DjinI/B78t7CU7ITy3VE+v/X3n2AR1VtDR9fgUCoAUFDuRSx0YsUAcFCkQjoBY1eC1I0gCCogALixUjTKEgXQREJvFdE8BWuAlIEgSuEKihNiqKgGPAqXTrzPWu9z5lvJiSQhEkmOfP/Pc9xMnP2nDlzEtl7nb332jrCb8qUKX7l9Lr7Th8IxO9ff+e+yRx15IkmF3RGOKanXZSaQBwDoYU5/MgyOuxI76zq8GCd75Ua7f1MzgkonWHD2iuqUqokM2L69Ol+c/+0ctB/oDUzqm9Ap1lpnaXlnCHNyZfYSc+5aQWgd/F12JsvzayrFY/v518N/Rxd11Z7Mxw6h12H42tgr3eKk9MhgKltuo6uztfTClXvLie/m+zcHXbuHvveLdbrpz3Cyel1S2lI+z/+8Q/rAZ88eXKKuQl0HrxvQy/5sfU7+tJzatGihc3j9B1id/DgQeuZ1hwAetc8vfSY2rCKj4+/ZJ36y10P/dl3mGIg+P496fH1ufa+ayPXOQ/9+/Id1qjXIvmwyssJxDEA4GrRtrj6toXO705ez+uUAF1aV+t7vRmQ0r/tGWlbBEp66nItm7zXWqcbatvCVyB+/5on6Ny5c97nuuyvXhPnmqenXZSaQBwDoYUefmQK7QHWO6z6j5z+46sVsvZi6t1enbucPKmZLw0gddidzvXS8jpHW/8R0zlITkI2rSA14cikSZMsyNJ/pDXZSFrmWqdEh3fpsTUxip6vzjHXXlpN1ubQeX9aWevcOA1Cdc62rhnrm+gmveemlaneCdYeBq2watasaUMLtQLToWbJj301SWT0Dr4OG9ekOtrTq99Fk/Xpd02eQ0Bdbmiks1/PWxtamihG50JqT68m/NOGgga+moxOey90KTZNHqQNDe2JSWm4mC4po40GXWJG18LVxoJeH13yR+fvd+vWzXphGjVqZA0Z/fvS17WHWRsk+n5NAKffR+98N2jQwNaXdXq3fe/c67xzZz1mXYpI5xbq9dFGny5ZkxHasNDGlP6d6Plr0iD97jo6QXtKtFdfh/Dr7/TFF1+0hoa+R3vKAzlfTv/f0qGqes31707/X9Thq7pMkpP/QP/f0t4t/VvW89T/x3R5PP2dam9NWgTiGACQHrQtMqdtoXO3k9f5Wi9qHXW5aQwZaVsEUlrrck36q79//T1ou0RH2+noheT5bgLx+9fAW2+u6+9SRx/o35ien06BU+lpF6UmEMdAiAn2MgFwF2dZEmfT5VJ02a177rnHlqHxXZ4mtaVzdOkUXX6tdOnS9n591KVgdu3a5fc+XeJEl34JDw/3W0ZElyCpWrVqiueX2tI5H330kS0HFhUVZcvT6TJmvkukOUaOHGlLy+hSLrqU24YNGy455uXOLfmyfOr48eO2LI1+zzx58thycyNGjPBbyk3pcXr06HHJOaW2pE9yBw8e9Dz55JOea6+91q6rLmeT0tIrejz9/mmly9zpUkh6TXT5P70WS5Ys8e7XpXYaNGhg11W/Y79+/bxLv+j1d5w4ccLz+OOPe4oWLWr7fK+TLnHz5ptv2u/V+Rxd+mbw4MGeo0ePesudPHnSrlGxYsVsCaG2bdt6du7cacfT5f18ffPNN7b8oJbTZWyaNGniWb16dZqXgkq+LJ/js88+89x+++32fSMjIz233Xab/X05dNmh5s2b2+fq76JLly7eJZB8fx8ZXZavYMGCtkSRs/ShLp2kx9JlEn3p0kX6t6bXs1KlSvbZKX1man93V3sMvW76uv6t+3L+n5w9e3a6vjsA96JtEVpti9Sute+yfOmpy3WZuhdeeMFTqlQp+z3oNU5MTEzXNU7r3+iKFSs8Xbt2tXaKnlO7du08f/zxh1/ZtLaLLvc3d7XHSO33cbk6HzlXmP4n2DcdACCz6NBEXSZIe0wuN9zTDbSXRXtXdM4gAADIGjrtQUcQ6ChHHXUIZCfM4QfgGjqnPzkdVqi5BnwT2wAAAAChgDn8AFxD5+zpPEKdu6hz+XS+p246zzArlu/LLJrMMKWbGb5KliyZZecDAEAo0Lo3pYTCyXM1ANkZAT8A19BENprARxMJ6rD2cuXKyaBBgyxxUU72/PPPW9K/y2F2FgAAgaXJhHWo/uVoQmEgO2MOPwBkc9u3b5cDBw5ctoyuIQ0AAAJHl1Hctm3bZcvoKkGaNR/Irgj4AQAAAABwIYb0p8HFixetd03X5PRdyxsAgGDR+/XHjx+X0qVLW2JKXD3qewCA2+p6Av400Mo/Jyf8AgC41/79+6VMmTLBPg1XoL4HALitrifgTwO90+9c6MjIyGCfDgAAcuzYMQtOnToKV4/6HgDgtrqegD8NnGF9WvnTAAAAZCcMPQ8c6nsAgNvqeib9AQAAAADgQgT8AAAg6H799Vd54oknpHjx4pI/f36pXr26bNiwwS9xUVxcnJQqVcr261KUu3fv9jvGn3/+Ke3atbPe+aJFi0psbKycOHEiCN8GAIDsgYAfAAAE1eHDh6VRo0aSJ08e+eKLL2T79u0ycuRIv7Wthw8fLuPGjZNJkybJ2rVrpWDBghIdHS2nT5/2ltFgX9fMXrJkicybN09WrlwpXbt2DdK3AgAg+MI8esscV0yWUKRIETl69Chz+gAA2YKb6qaXXnpJVq1aJf/5z39S3K9NFV2S6IUXXpAXX3zRXtPvXaJECUlISJBHH31UduzYIVWqVJH169dL3bp1rczChQulVatW8ssvv9j7kztz5oxtyZMjueGaAgByvkDU9fTwAwCAoPrss88sSH/44YclKipKbr31Vpk8ebJ3/969eyUpKcmG8Tu0AVS/fn1JTEy05/qow/idYF9peV23WEcEpCQ+Pt6O42wsyQcAcBsCfgAAEFQ//vijTJw4UW6++WZZtGiRdO/eXZ577jmZNm2a7ddgX2mPvi997uzTR71Z4Cs8PFyKFSvmLZPcgAEDrNfE2XQ5PgAA3IRl+QAAQFBdvHjReuZff/11e649/Fu3brX5+h07dsy0z42IiLANAAC3oocfAAAElWbe1/n3vipXriz79u2zn0uWLGmPBw8e9Cujz519+njo0CG//efPn7fM/U4ZAABCDQE/AAAIKs3Qv3PnTr/Xdu3aJeXLl7efK1SoYEH70qVL/RIZ6dz8hg0b2nN9PHLkiGzcuNFbZtmyZTZ6QOf6AwAQihjSDwAAgqp3795y++2325D+f/zjH7Ju3Tp57733bFNhYWHSq1cvGTZsmM3z1xsAr7zyimXeb9u2rXdEwL333itdunSxqQDnzp2Tnj17Wgb/lDL0AwAQCgj4AWS52IT1qe6b0qlelp4LgOCrV6+ezJkzx5LoDRkyxAL6MWPGSLt27bxl+vXrJydPnpSuXbtaT37jxo1t2b18+fJ5y3z44YcW5Ddr1syy88fExMi4ceOC9K2A0Ha5ul5R3wNZg4AfAAAE3X333WdbarSXX28G6JYazcg/Y8aMTDpDAAByHubwAwAAAADgQgT8AAAAAAC4EAE/AAAAAAAuFNSA/8KFC5ZlV5Pz5M+fX2688UYZOnSoeDwebxn9OS4uztbo1TLNmzeX3bt3+x1H19jVxD6RkZFStGhRiY2NlRMnTviV+e677+SOO+6w5D5ly5aV4cOHZ9n3BAAAAAAgpJL2vfnmmzJx4kSZNm2aVK1aVTZs2CBPPvmkFClSRJ577jkro4G5ZtjVMs4yPNHR0bJ9+3ZvZl4N9n/77TdZsmSJLcOjx9Asvk7iHl2rt0WLFnazQJfq2bJlizz11FN2c0DLAQAAAAhcFn4A2UNQA/7Vq1dLmzZtpHXr1vb8+uuvl48++sjW33V693VZnoEDB1o5NX36dClRooTMnTvX1tbdsWOHLcuzfv16qVu3rpUZP368tGrVSt566y1be1eX6Tl79qx88MEHkjdvXru5sHnzZhk1ahQBPwAAAJDFWKIXCIEh/bfffrssXbpUdu3aZc+//fZb+frrr6Vly5b2fO/evZKUlGQ98w7t/a9fv74kJibac33Unnon2FdaXtffXbt2rbfMnXfeacG+Q0cJ7Ny5Uw4fPnzJeZ05c8ZGBfhuAAAAAADkJEHt4X/ppZcsmK5UqZLkzp3b5vS/9tprNkRfabCvtEfflz539uljVFSU3/7w8HBbi9e3jE4HSH4MZ98111zjty8+Pl4GDx4c8O8LAAAAAEBI9PDPmjXLhtvrXPtvvvnG5unrMHx9DKYBAwbI0aNHvdv+/fuDej4AAAAAAOSoHv6+fftaL7/OxVfVq1eXn3/+2XrYO3bsKCVLlrTXDx48aFn6Hfq8Vq1a9rOWOXTokN9xz58/b5n7nffro77Hl/PcKeMrIiLCNgAAAAAAcqqg9vD/9ddfNtfelw7tv3jxov2sw/A1INd5/g6dAqBz8xs2bGjP9fHIkSOyceNGb5lly5bZMXSuv1Nm5cqVlsHfoRn9K1aseMlwfgAAAAAA3CCoAf/9999vc/bnz58vP/30k8yZM8cy5z/wwAO2PywsTHr16iXDhg2Tzz77zJbT69Chg2Xeb9u2rZWpXLmy3HvvvdKlSxfL7r9q1Srp2bOnjRrQcurxxx+3hH2xsbGybds2+fjjj2Xs2LHSp0+fYH59AAAAAADcOaRfl8975ZVX5JlnnrFh+RqgP/300xIXF+ct069fPzl58qQtn6c9+Y0bN7Zl+PLly+cto3kANMhv1qyZjRiIiYmRcePG+WX2X7x4sfTo0UPq1Kkj1157rX0GS/IBAAAAANwqzKOL3eOydBqB3jTQBH6RkZHBPh0gx2PtXeDqUTcFHtcUCExdfrVoCwCBq5eCOqQfAAAAAABkDgJ+AAAAAABciIAfAAAAAAAXIuAHAAAAAMCFgpqlHwAAAEDoJebLzPMi6R/w/9HDDwAAAACACxHwAwAAAADgQgT8AAAAAAC4EAE/AAAAAAAuRMAPAAAAAIALkaUfAAAAQLaRXVcHAHIievgBAAAAAHAhevgBAAAAhMQIgSmd6mXpuQDBRg8/AAAAAAAuRA8/AAAAEIJz4entBtyPHn4AAAAAAFyIgB8AAAAAABci4AcAAAAAwIUI+AEAAAAAcCECfgAAEFSDBg2SsLAwv61SpUre/adPn5YePXpI8eLFpVChQhITEyMHDx70O8a+ffukdevWUqBAAYmKipK+ffvK+fPng/BtAADIPoIa8F9//fWXVPC6aaUeyAp++fLlUrt2bYmIiJCbbrpJEhISsvR7AgCAy6tatar89ttv3u3rr7/27uvdu7d8/vnnMnv2bFmxYoUcOHBAHnzwQe/+CxcuWFvg7Nmzsnr1apk2bZrV9XFxcUH6NgAAZA9BXZZv/fr1Vkk7tm7dKvfcc488/PDD3gp+/vz5VsEXKVJEevbsaRX8qlWr/Cr4kiVLWgWvDYQOHTpInjx55PXXX7cye/futTLdunWTDz/8UJYuXSqdO3eWUqVKSXR0dJC+OQAA8BUeHm71eXJHjx6VKVOmyIwZM6Rp06b22tSpU6Vy5cqyZs0aadCggSxevFi2b98uX375pZQoUUJq1aolQ4cOlf79+9vogbx586b4mWfOnLHNcezYsUz8hgAAhFgP/3XXXWeVu7PNmzdPbrzxRrnrrru8FfyoUaOsgq9Tp45V8BrYawWvnAr+X//6l1XuLVu2tAp+woQJdpdfTZo0SSpUqCAjR460xoHeNHjooYdk9OjRwfzqAADAx+7du6V06dJyww03SLt27WwEn9q4caOcO3dOmjdv7i2rw/3LlSsniYmJ9lwfq1evbsG+Q2/qawC/bdu2VD8zPj7eOhScrWzZspn6HQEACNk5/Bqga+D+1FNP2bD+QFXwWsb3GE4Z5xgp0bv9egzfDQAAZI769evbEPyFCxfKxIkTbXTeHXfcIcePH5ekpCTroS9atKjfe7Tu131KH33bAs5+Z19qBgwYYB0MzrZ///5M+X4AAITkkH5fc+fOlSNHjkinTp3seaAq+NTKaBB/6tQpyZ8/f4p3/AcPHhzgbwgAAFKiI/QcNWrUsBsA5cuXl1mzZqVYTweK5vbRDQAAt8o2Pfw6fF8rfB3OF2zc8QcAIHj0Zv8tt9wie/bssSl/OgpQOwV8aRJfZ86/PiZP6us8TykvAAAAoSJbBPw///yzJdrRZHqOQFXwqZWJjIxMtddA7/brft8NAABkjRMnTsgPP/xgCXY1h48m49Wku46dO3faHP+GDRvac33csmWLHDp0yFtmyZIlVn9XqVIlKN8BAIDsIFsE/JqMT5fU02z6jkBV8FrG9xhOGecYAAAguF588UVbbu+nn36y5LwPPPCA5M6dWx577DFLphcbGyt9+vSRr776ynL8PPnkk1aPa4Z+1aJFC6v327dvL99++60sWrRIBg4caEv7MmQfABDKgj6H/+LFixbwd+zY0ZbkcfhW8MWKFbMg/tlnn021gh8+fLjN109ewetyfG+//bb069fPEgIuW7bM5gTqcn8AACD4fvnlFwvu//jjD1vBp3HjxrYij/6sdGWdXLlySUxMjCXW1eS777zzjvf9enNAV/rp3r27tRMKFixo7YohQ4YE8VsBWSM2YX2wTwFANhb0gF+H8muvvQbjyQWigtcl+TS47927t4wdO1bKlCkj77//vh0LAAAE38yZMy+7P1++fLbkrm6p0SR/CxYsyISzAwAg5wp6wK+99B6PJ1Mr+Lvvvls2bdp01ecKAAAAAEBOkS3m8AMAAAAAgMAi4AcAAAAAwIUI+AEAAAAAcCECfgAAAAAAXIiAHwAAAAAAFyLgBwAAAADAhQj4AQAAAABwIQJ+AAAAAABciIAfAAAAAAAXIuAHAAAAAMCFCPgBAAAAAHAhAn4AAAAAAFwoPNgnAAAAACDrxSasD/YpAMhk9PADAAAAAOBCBPwAAAAAALgQAT8AAAAAAC5EwA8AAAAAgAuRtA8AAABASLhSosIpnepl2bkAWYEefgAAAAAAXIiAHwAAAAAAFyLgBwAAAADAhYIe8P/666/yxBNPSPHixSV//vxSvXp12bBhg3e/x+ORuLg4KVWqlO1v3ry57N692+8Yf/75p7Rr104iIyOlaNGiEhsbKydOnPAr891338kdd9wh+fLlk7Jly8rw4cOz7DsCAAAAABBSAf/hw4elUaNGkidPHvniiy9k+/btMnLkSLnmmmu8ZTQwHzdunEyaNEnWrl0rBQsWlOjoaDl9+rS3jAb727ZtkyVLlsi8efNk5cqV0rVrV+/+Y8eOSYsWLaR8+fKyceNGGTFihAwaNEjee++9LP/OAAAAAAC4Pkv/m2++ab3tU6dO9b5WoUIFv979MWPGyMCBA6VNmzb22vTp06VEiRIyd+5cefTRR2XHjh2ycOFCWb9+vdStW9fKjB8/Xlq1aiVvvfWWlC5dWj788EM5e/asfPDBB5I3b16pWrWqbN68WUaNGuV3Y8Bx5swZ23xvGAAAAAAAkJMEtYf/s88+syD94YcflqioKLn11ltl8uTJ3v179+6VpKQkG8bvKFKkiNSvX18SExPtuT7qMH4n2FdaPleuXDYiwClz5513WrDv0FECO3futFEGycXHx9vnOJvelAAAAAAAICcJag//jz/+KBMnTpQ+ffrIyy+/bL30zz33nAXmHTt2tGBfaY++L33u7NNHvVngKzw8XIoVK+ZXxnfkgO8xdZ/vFAI1YMAAOyffHn6CfgAAAMDdYhPWX3b/lE71suxcgBwf8F+8eNF65l9//XV7rj38W7dutfn6GvAHS0REhG0AAAAAAORUQR3Sr5n3q1Sp4vda5cqVZd++ffZzyZIl7fHgwYN+ZfS5s08fDx065Lf//Pnzlrnft0xKx/D9DAAAAAAA3CSoAb9m6Nd59L527dpl2fSVDsPXgHzp0qV+w+t1bn7Dhg3tuT4eOXLEsu87li1bZqMHdK6/U0Yz9587d85bRjP6V6xY8ZLh/AAAAAAAuEFQA/7evXvLmjVrbEj/nj17ZMaMGbZUXo8ePWx/WFiY9OrVS4YNG2YJ/rZs2SIdOnSwzPtt27b1jgi49957pUuXLrJu3TpZtWqV9OzZ0zL4azn1+OOPW16A2NhYW77v448/lrFjx/rN0wcAAAAAwE2COoe/Xr16MmfOHEuSN2TIEOvR12X42rVr5y3Tr18/OXnypC2fpz35jRs3tmX48uXL5y2jy+5pkN+sWTPLzh8TEyPjxo3z7tdM+4sXL7YbCXXq1JFrr71W4uLiUlySDwAAAAAANwjz6GL3uCydRqA3DY4ePSqRkZHBPh3A1RlwyX4LpA11U+BxTeHGrPIILNopyGn1UlCH9AMAAPh64403vFP6HKdPn7ZResWLF5dChQrZSL7kyXg14W/r1q2lQIECtlxv3759LYkvAAChjIAfAABkC+vXr5d3331XatSocUnOn88//1xmz54tK1askAMHDsiDDz7o3X/hwgUL9s+ePSurV6+WadOmSUJCgk3fAwAglBHwAwCAoDtx4oTl8Jk8ebLfCjo6jHHKlCkyatQoadq0qeXimTp1qgX2mvhXaZ6e7du3y7/+9S+pVauWtGzZUoYOHSoTJkywmwAAAIQqAn4AABB0OmRfe+mbN2/u97ouu6vL6vq+XqlSJSlXrpwkJibac32sXr26lChRwlsmOjra5j7q6jypOXPmjJXx3QAAcJOgZukHAACYOXOmfPPNNzakP7mkpCRbWrdo0aJ+r2twr/ucMr7BvrPf2Zea+Ph4GTx4cIC+BQAA2Q89/AAAIGj2798vzz//vC2x67vkblbQZYF1yoCz6bkAAOAmBPwAACBodMj+oUOHpHbt2hIeHm6bJuYbN26c/aw99ToP/8iRI37v0yz9JUuWtJ/1MXnWfue5UyYlERERtsyR7wYAgJsQ8AMAgKBp1qyZbNmyRTZv3uzd6tatawn8nJ/z5MkjS5cu9b5n586dtgxfw4YN7bk+6jH0xoFjyZIlFsBXqVIlKN8LAIDsgDn8AAAgaAoXLizVqlXze61gwYJSvHhx7+uxsbHSp08fKVasmAXxzz77rAX5DRo0sP0tWrSwwL59+/YyfPhwm7c/cOBASwSovfgAAIQqAn4AAJCtjR49WnLlyiUxMTGWWV8z8L/zzjve/blz55Z58+ZJ9+7d7UaA3jDo2LGjDBkyJKjnDQBAsBHwAwCAbGX58uV+zzWZ34QJE2xLTfny5WXBggVZcHYAAOQczOEHAAAAAMCFCPgBAAAAAHAhAn4AAAAAAFyIgB8AAAAAABci4AcAAAAAwIUI+AEAAAAAcCECfgAAAAAAXIiAHwAAAAAAFyLgBwAAAADAhYIa8A8aNEjCwsL8tkqVKnn3nz59Wnr06CHFixeXQoUKSUxMjBw8eNDvGPv27ZPWrVtLgQIFJCoqSvr27Svnz5/3K7N8+XKpXbu2REREyE033SQJCQlZ9h0BAAAAAMgxAf+PP/4YsBOoWrWq/Pbbb97t66+/9u7r3bu3fP755zJ79mxZsWKFHDhwQB588EHv/gsXLliwf/bsWVm9erVMmzbNgvm4uDhvmb1791qZJk2ayObNm6VXr17SuXNnWbRoUcC+AwAAoSiQ7QEAABB44Rl5k/aS33XXXRIbGysPPfSQ5MuXL+MnEB4uJUuWvOT1o0ePypQpU2TGjBnStGlTe23q1KlSuXJlWbNmjTRo0EAWL14s27dvly+//FJKlCghtWrVkqFDh0r//v1t9EDevHll0qRJUqFCBRk5cqQdQ9+vNxVGjx4t0dHRGT5vAABCXSDbAwBSFpuwPtinACDUevi/+eYbqVGjhvTp08eC9aefflrWrVuXoRPYvXu3lC5dWm644QZp166dDdFXGzdulHPnzknz5s29ZXW4f7ly5SQxMdGe62P16tUt2HdoEH/s2DHZtm2bt4zvMZwyzjFScubMGTuG7wYAADKvPQAAALJJwK896WPHjrUh9h988IENxW/cuLFUq1ZNRo0aJb///nuajlO/fn0bgr9w4UKZOHGiDb+/44475Pjx45KUlGQ99EWLFvV7jwb3uk/po2+w7+x39l2ujAbxp06dSvG84uPjpUiRIt6tbNmy6bg6AACEhkC1BwAAQDZM2qfD8XVOvc6xf/PNN2XPnj3y4osvWoDcoUMHq/gvp2XLlvLwww9b74D2ui9YsECOHDkis2bNkmAaMGCATSlwtv379wf1fAAAyM6utj0AAACyYcC/YcMGeeaZZ6RUqVJ2J18r9x9++EGWLFlid/vbtGmTruNpb/4tt9xiDQUdGqjJ+PQGgC/N0u/M+dfH5Fn7nedXKhMZGSn58+dP8Tw0m7/u990AAEDWtAcAAEAQA36tzHXu/O23324V+fTp0+Xnn3+WYcOGWYI8HZavQ/V1bl96nDhxwhoI2mCoU6eO5MmTR5YuXerdv3PnTpvj37BhQ3uuj1u2bJFDhw55y2jjQgP0KlWqeMv4HsMp4xwDAABkTGa1BwAAQBCz9Ot8+6eeeko6depkwXlKoqKiLMv+5WgPwP333y/ly5e3hsKrr74quXPnlscee8zmzmvWX00EVKxYMQvin332WQvUNUO/atGihQX27du3l+HDh9t8/YEDB0qPHj2sl15169ZN3n77benXr5+d87Jly2zKwPz58zPy1QEAQIDbAwAAIBsF/NpDrtnyc+XyHyDg8Xhsvrvu04R7HTt2vOxxfvnlFwvu//jjD7nuuuss0Y8uuac/K106Tz8jJibGMufrPP933nnH+369OTBv3jzp3r273QgoWLCgfeaQIUO8ZbSHQYP73r17W2KhMmXKyPvvv8+SfAAAXKVAtQcAwC3LJE7pVC/LzgVIizCP1srppIG2JuDRu/a+NHDX1y5cuCBuohn9dcSBJvBjPj+QuZUlFSWQc+omt7UHssM1BdIbYCJ7oR2D7FYvZWgOf2r3CHQOfr58+TJ0IgAAIGehPQAAgIuG9Ot8ehUWFiZxcXFSoEAB7z69i7927VpbkxcAALgX7QEAAFwY8G/atMl7R1+z4+u8PIf+XLNmTUvEBwAA3Iv2AAAALgz4v/rqK3t88sknLQEe89sAAAg9tAcAAHBxlv6pU6cG/kwAAECOQnsAAACXBPwPPvigJCQk2F18/flyPv3000CcGwAAyGZoDwBA6liJCDk24NflADQ5j/MzAAAIPbQHAABwYcDvO2yPIXwAAIQm2gMAAOQcuTLyplOnTslff/3lff7zzz/LmDFjZPHixYE8NwAAkI3RHgAAwIUBf5s2bWT69On285EjR+S2226TkSNH2usTJ04M9DkCAIBsiPYAAAAuDPi/+eYbueOOO+znTz75REqWLGl39bXSHzduXKDPEQAAZEO0BwAAcGHAr8P3ChcubD/rsD3N0psrVy5p0KCBVfQAAMD9aA8AAOCSpH2+brrpJpk7d6488MADsmjRIundu7e9fujQIVumBwAAuB/tASBzl3EDgKD08MfFxcmLL74o119/vdSvX18aNmzovbt/6623XvVJAQCA7C9Q7QGd71+jRg27SaCbHueLL77w7j99+rT06NFDihcvLoUKFZKYmBg5ePCg3zH27dsnrVu3lgIFCkhUVJT07dtXzp8/H8BvCwBAiPTwP/TQQ9K4cWP57bffpGbNmt7XmzVrZnf5AQCA+wWqPVCmTBl544035OabbxaPxyPTpk2zxH+bNm2SqlWr2siB+fPny+zZs6VIkSLSs2dPmz6watUqe/+FCxcs2NccAqtXr7bz6dChg+TJk0def/31TPnuAADkBGEerVlxWceOHbMGxtGjRxmiCGTy8MUpnepl6bkAOZXb66ZixYrJiBEj7KbCddddJzNmzLCf1ffffy+VK1eWxMREyxegowHuu+8+OXDggJQoUcLKTJo0Sfr37y+///675M2bN02f6fZriuyJIf2hgzYO0isQ9VKGevhPnjxpd+KXLl1q8/QuXrzot//HH3/M0MkAAICcIzPaA9pbrz35emwd2r9x40Y5d+6cNG/e3FumUqVKUq5cOW/Ar4/Vq1f3BvsqOjpaunfvLtu2bUt1esGZM2ds821YAQDgJhkK+Dt37iwrVqyQ9u3bS6lSpSQsLCzwZwYAALK1QLYHtmzZYgG+ztfXefpz5syRKlWqyObNm62HvmjRon7lNbhPSkqyn/XRN9h39jv7UhMfHy+DBw/O8DkDaUUvPoAcFfDr0DmdS9eoUaPAnxEAAMgRAtkeqFixogX3Omzxk08+kY4dO9rNhMw0YMAA6dOnj18Pf9myZTP1MwEAyPYB/zXXXGNz6wAAQOgKZHtAe/F1mT9Vp04dWb9+vYwdO1YeeeQROXv2rBw5csSvl1+z9GuSPqWP69at8zuek8XfKZOSiIgI2wAAcKsMLcs3dOhQW4rnr7/+CtiJ6BxAHQrYq1evgC/Ds3z5cqldu7ZV6tqYSEhICNh5AwAQqjKjPeDQfAA6v16Df822r3kCHDt37rT631kGUB91SoDmEXAsWbLEEhzptAAAAEJVhnr4R44cKT/88IPNj9O1d7Ui9vXNN9+k63h6F//dd9+1NXh9BWIZnr1791qZbt26yYcffmgNBp1zqHMNNaEPAADImEC1B3RofcuWLS0R3/Hjxy0jv96sX7RokdX/sbGxNvReRxNoEP/ss89akK8J+1SLFi0ssNdcAsOHD7d5+wMHDrROA3rwAQChLEMBf9u2bQN2AidOnJB27drJ5MmTZdiwYd7XdQ7flClTrNJv2rSpvTZ16lRbhmfNmjVWyS9evFi2b98uX375pTU2atWqZb0NugzPoEGDbHigLstToUIFa5Qoff/XX38to0ePJuAHAOAqBKo9oD3zesNeb9xrgK8dABrs33PPPbZf6+xcuXLZSD/t9df6+5133vG+P3fu3DJv3jzLyq83AgoWLGg5AIYMGRKQ8wMAIKQC/ldffTVgJ6B337UHXpfb8Q34A7UMj5bxPYZTxnfqQHIs0wMAQNa1B/QG/+Xky5dPJkyYYFtqypcvLwsWLAjI+QAAENJz+JUmz3n//fdtGN6ff/7pHbr366+/pvkYM2fOtPfosjjJ6XC8QCzDk1oZDeJPnTqV4nnp+WgPg7ORsRcAgMxrDwAAgGzUw//dd99Zr7kGwz/99JN06dLF5tV9+umnlkRn+vTpVzzG/v375fnnn7ekOnrnPjthmR4AALKmPQAAALJZD78Gw506dZLdu3f7BeutWrWSlStXpukYOmRf5+xp9vzw8HDbdL3dcePG2c/aC+8sw+Mr+TI8ybP2J1+GJ7UymvQnf/78KZ6bJvjR/b4bAAAIfHsAAABks4Bfs+o//fTTl7z+t7/9zTuU/kqaNWtmS+hs3rzZu9WtW9cS+Dk/B2IZHi3jewynjHMMAACQMYFoDwAAgMyToSH92gOeUiK7Xbt2yXXXXZemYxQuXFiqVavm95pm1S1evLj39UAsw6PL8b399tvSr18/eeqpp2TZsmUya9YsW+4PAABkXCDaAwAAIJv18P/973+3pW40i74KCwuznnddDk+XzAkUXYbnvvvus2PeeeedNjxf5wUmX4ZHH/VGwBNPPGHL+vguw6NL8mlwr736NWvWtOX5NLkQS/IBAHB1sqo9AAAAMibM4/F40vumo0ePykMPPWRD+U6cOCGlS5e23nUNunVJHO2pdxPtvdCERPq9mc8PXL3YhPWp7pvSqV6WnguQU2WHuslt7YHscE0RevUeQgdtHASjXsrQkH79UO0xX7VqlXz77bdWyWvyveTr3QMAAPeiPQAAQPaW7oD/4sWLkpCQYEPrdQkeHb6nw+Z1uL0OFtDnAADA3WgPAADgsjn8WoHrfL3OnTvLr7/+KtWrV5eqVavKzz//bMvyPPDAA5l3pgAAIFugPQAAgAt7+PVOvq6rq8vcNWnSxG+fZr9v27atTJ8+3RLnAQAAd6I9AACAC3v4P/roI3n55ZcvqdxV06ZN5aWXXpIPP/wwkOcHAACyGdoDAAC4MOD/7rvv5N577011f8uWLS1pDwAAcC/aAwAAuDDg//PPP6VEiRKp7td9hw8fDsR5AQCAbIr2AAAALgz4L1y4IOHhqU/7z507t5w/fz4Q5wUAALIp2gMAALgwaZ9m5dXsuxERESnuP3PmTKDOCwAAZFO0BwAg/WIT1l92/5RO9bLsXBA60hXwd+zY8YplyMgLAIC70R4AAMCFAf/UqVMz70wAAECOQHsAAAAXzuEHAAAAAAA5AwE/AAAAAAAuRMAPAAAAAIALEfADAAAAAOBCBPwAAAAAALgQAT8AAAAAAC5EwA8AAAAAgAsR8AMAAAAA4EIE/AAAAAAAuFBQA/6JEydKjRo1JDIy0raGDRvKF1984d1/+vRp6dGjhxQvXlwKFSokMTExcvDgQb9j7Nu3T1q3bi0FChSQqKgo6du3r5w/f96vzPLly6V27doSEREhN910kyQkJGTZdwQAAAAAIOQC/jJlysgbb7whGzdulA0bNkjTpk2lTZs2sm3bNtvfu3dv+fzzz2X27NmyYsUKOXDggDz44IPe91+4cMGC/bNnz8rq1atl2rRpFszHxcV5y+zdu9fKNGnSRDZv3iy9evWSzp07y6JFi4LynQEAAAAAyAphHo/HI9lIsWLFZMSIEfLQQw/JddddJzNmzLCf1ffffy+VK1eWxMREadCggY0GuO++++xGQIkSJazMpEmTpH///vL7779L3rx57ef58+fL1q1bvZ/x6KOPypEjR2ThwoVpOqdjx45JkSJF5OjRozYSAcDViU1Yn+q+KZ3qZem5ADkVdVPgcU2RGfUakFa0gZAZ9VK2mcOvvfUzZ86UkydP2tB+7fU/d+6cNG/e3FumUqVKUq5cOQv4lT5Wr17dG+yr6OhouzDOKAEt43sMp4xzjJScOXPGjuG7AQAAAACQkwQ94N+yZYvNz9f59d26dZM5c+ZIlSpVJCkpyXroixYt6ldeg3vdp/TRN9h39jv7LldGg/hTp06leE7x8fF2J8XZypYtG9DvDAAAAACA6wP+ihUr2tz6tWvXSvfu3aVjx46yffv2oJ7TgAEDbNiEs+3fvz+o5wMAAAAAQI4L+LUXXzPn16lTx3rWa9asKWPHjpWSJUtaMj6da+9Ls/TrPqWPybP2O8+vVEbnQOTPnz/Fc9LRBs7KAc4GAAAyh9b/9erVk8KFC9uKO23btpWdO3f6lQnUyj0AAISSoAf8yV28eNHm0OsNgDx58sjSpUu9+7Ty18pc5/grfdQpAYcOHfKWWbJkiQXoOi3AKeN7DKeMcwwAABBcuhKPBvNr1qyxOlpz+LRo0cLy+jgCsXIPAAChJjzYQ+dbtmxpifiOHz9uGfmXL19uS+bp3PnY2Fjp06ePZe7XIP7ZZ5+1QF0z9CttDGhg3759exk+fLjN1x84cKA1GrSXXmlegLffflv69esnTz31lCxbtkxmzZplmfsBAEDwJV81RwN17aHXBL533nmnTa+bMmWKtRN0CV81depUW7lHbxJou2Dx4sU2JfDLL7+0XD21atWSoUOH2mo9gwYNshGFQEaRhR9AThXUHn7tme/QoYPN42/WrJmsX7/egv177rnH9o8ePdqW3dNhe1rh6/D8Tz/91Pv+3Llzy7x58+xRbwQ88cQTdrwhQ4Z4y1SoUMGCe+0x0OkCI0eOlPfff98y9QMAgOxHA3ylN/xVoFbuSY5VeQAAbhfUHn69W385+fLlkwkTJtiWmvLly8uCBQsue5y7775bNm3alOHzBAAAWTe1r1evXtKoUSOpVq2avRaolXtSyh0wePDgTPomABDYkSRTOtXLsnOBe2S7OfwAACB06bS8rVu3ysyZMzP9s1iVBwDgdkHt4QcAAHD07NnTpuqtXLlSypQp433dd+Ue317+5Cv3rFu37rIr9ySn+X6cnD8AALgRPfwAACCoPB6PBftz5syx5Lqaf8dXoFbuAQAg1NDDDwAAgj6MXzPw//vf/5bChQt759zrij358+cP2Mo9AACEGgJ+AAAQVBMnTvQm2fWlS+916tTJu3JPrly5bOUeza6vGfjfeeedS1bu6d69u90IKFiwoHTs2NFv5R4AAEINAT8AAAj6kP4rCdTKPQAAhBLm8AMAAAAA4EIE/AAAAAAAuBABPwAAAAAALkTADwAAAACACxHwAwAAAADgQgT8AAAAAAC4EAE/AAAAAAAuRMAPAAAAAIALEfADAAAAAOBC4cE+AQAAACCYYhPWB/sUACBTEPADAAAAQA6+MTWlU70sPRfkHAzpBwAAAADAhQj4AQAAAABwIQJ+AAAAAABcKKgBf3x8vNSrV08KFy4sUVFR0rZtW9m5c6dfmdOnT0uPHj2kePHiUqhQIYmJiZGDBw/6ldm3b5+0bt1aChQoYMfp27evnD9/3q/M8uXLpXbt2hIRESE33XSTJCQkZMl3BAAAAAAg5AL+FStWWDC/Zs0aWbJkiZw7d05atGghJ0+e9Jbp3bu3fP755zJ79mwrf+DAAXnwwQe9+y9cuGDB/tmzZ2X16tUybdo0C+bj4uK8Zfbu3WtlmjRpIps3b5ZevXpJ586dZdGiRVn+nQEAAAAAcH2W/oULF/o910Bde+g3btwod955pxw9elSmTJkiM2bMkKZNm1qZqVOnSuXKle0mQYMGDWTx4sWyfft2+fLLL6VEiRJSq1YtGTp0qPTv318GDRokefPmlUmTJkmFChVk5MiRdgx9/9dffy2jR4+W6OjooHx3AAAAAABCZg6/BviqWLFi9qiBv/b6N2/e3FumUqVKUq5cOUlMTLTn+li9enUL9h0axB87dky2bdvmLeN7DKeMc4zkzpw5Y+/33QAAAAAAyEmyTcB/8eJFG2rfqFEjqVatmr2WlJRkPfRFixb1K6vBve5zyvgG+85+Z9/lymggf+rUqRRzCxQpUsS7lS1bNsDfFgAAAAAAFw/p96Vz+bdu3WpD7YNtwIAB0qdPH+9zvTFA0A8AAJBzxSasD/YpAEBoBvw9e/aUefPmycqVK6VMmTLe10uWLGnJ+I4cOeLXy69Z+nWfU2bdunV+x3Oy+PuWSZ7ZX59HRkZK/vz5LzkfzeSvGwAAAAAAOVVQh/R7PB4L9ufMmSPLli2zxHq+6tSpI3ny5JGlS5d6X9Nl+3QZvoYNG9pzfdyyZYscOnTIW0Yz/mswX6VKFW8Z32M4ZZxjAAAAAADgNuHBHsavGfj//e9/S+HChb1z7nXevPa862NsbKwNr9dEfhrEP/vssxaoa4Z+pcv4aWDfvn17GT58uB1j4MCBdmynl75bt27y9ttvS79+/eSpp56ymwuzZs2S+fPnB/PrAwAAAADgzh7+iRMnWmb+u+++W0qVKuXdPv74Y28ZXTrvvvvuk5iYGFuqT4fnf/rpp979uXPntukA+qg3Ap544gnp0KGDDBkyxFtGRw5ocK+9+jVr1rTl+d5//32W5AMAAAAAuFZ4sIf0X0m+fPlkwoQJtqWmfPnysmDBgsseR28qbNq0KUPnCQAAAABATpNtluUDAAAAAACBQ8APAAAAAIALEfADAAAAAOBCBPwAAAAAALgQAT8AAAAAAC5EwA8AAAAAgAsR8AMAAAAA4EIE/AAAAAAAuBABPwAACKqVK1fK/fffL6VLl5awsDCZO3eu336PxyNxcXFSqlQpyZ8/vzRv3lx2797tV+bPP/+Udu3aSWRkpBQtWlRiY2PlxIkTWfxNACB7ik1Yf9kN7kXADwAAgurkyZNSs2ZNmTBhQor7hw8fLuPGjZNJkybJ2rVrpWDBghIdHS2nT5/2ltFgf9u2bbJkyRKZN2+e3UTo2rVrFn4LAACyn/BgnwAAAAhtLVu2tC0l2rs/ZswYGThwoLRp08Zemz59upQoUcJGAjz66KOyY8cOWbhwoaxfv17q1q1rZcaPHy+tWrWSt956y0YOAAAQiujhBwAA2dbevXslKSnJhvE7ihQpIvXr15fExER7ro86jN8J9pWWz5Url40ISM2ZM2fk2LFjfhsAAG5CDz8AAMi2NNhX2qPvS587+/QxKirKb394eLgUK1bMWyYl8fHxMnjw4Ew5bwDISszDR2ro4QcAACFpwIABcvToUe+2f//+YJ8SAAABRcAPAACyrZIlS9rjwYMH/V7X584+fTx06JDf/vPnz1vmfqdMSiIiIiyrv+8GAICbMKQfAABkWxUqVLCgfenSpVKrVi17Tefa69z87t272/OGDRvKkSNHZOPGjVKnTh17bdmyZXLx4kWb64/QwJBmALgUAT8AAAiqEydOyJ49e/wS9W3evNnm4JcrV0569eolw4YNk5tvvtluALzyyiuWeb9t27ZWvnLlynLvvfdKly5dbOm+c+fOSc+ePS2DPxn6AQChjIAfAAAE1YYNG6RJkybe53369LHHjh07SkJCgvTr109OnjwpXbt2tZ78xo0b2zJ8+fLl877nww8/tCC/WbNmlp0/JiZGxo0bF5TvAwBuGiEzpVO9LD0XBBYBPwAACKq7775bPB5PqvvDwsJkyJAhtqVGRwPMmDEjk84QAICciaR9AAAAAAC4EAE/AAAAAAAuFNSAf+XKlXL//fdbQh0drjd37ly//Tq8Ly4uTkqVKiX58+eX5s2by+7du/3K6JI77dq1s6V0ihYtKrGxsZb8x9d3330nd9xxh831K1u2rAwfPjxLvh8AAAAAACEZ8GsCnpo1a8qECRNS3K+BuSbc0Yy7uvxOwYIFJTo6Wk6fPu0to8H+tm3bZMmSJTJv3jy7iaBJfRy6dE+LFi2kfPnytlzPiBEjZNCgQfLee+9lyXcEAAAAACDkkva1bNnStpRo7/6YMWNk4MCB0qZNG3tt+vTpUqJECRsJoEvt7Nixw7L0rl+/XurWrWtlxo8fL61atZK33nrLRg5o1t6zZ8/KBx98IHnz5pWqVavaUj+jRo3yuzHg68yZM7b53jQAAAAAACAnybZZ+nUN3qSkJBvG7yhSpIjUr19fEhMTLeDXRx3G7wT7Ssvrcjw6IuCBBx6wMnfeeacF+w4dJfDmm2/K4cOH5Zprrrnks+Pj42Xw4MFZ8C0BAABwtcuGAQByWNI+DfaV9uj70ufOPn2Miory2x8eHm5L8/iWSekYvp+R3IABA+To0aPebf/+/QH8ZgAAAAAAhHAPfzBFRETYBgAAAABATpVtA/6SJUva48GDBy1Lv0Of16pVy1vm0KFDfu87f/68Ze533q+P+h5fznOnDAAAAIKPYfsAECJD+itUqGAB+dKlS/2S5+nc/IYNG9pzfTxy5Ihl33csW7ZMLl68aHP9nTKauf/cuXPeMprRv2LFiinO3wcAAAAAwA2C2sN/4sQJ2bNnj1+iPs2gr3Pwy5UrJ7169ZJhw4bJzTffbDcAXnnlFcu837ZtWytfuXJluffee6VLly62dJ8G9T179rSEflpOPf7445aALzY2Vvr37y9bt26VsWPHyujRoyW73sGe0qlelp4LACDreij5Nx4AAIREwL9hwwZp0qSJ93mfPn3ssWPHjpKQkCD9+vWTkydP2vJ52pPfuHFjW4YvX7583vfosnsa5Ddr1syy88fExMi4ceP8MvsvXrxYevToIXXq1JFrr71W4uLiUl2SDwAAAAAANwhqwH/33XeLx+NJdX9YWJgMGTLEttToaIAZM2Zc9nNq1Kgh//nPf67qXAEAAAAAyEmybdI+AAAAuAtJ+QAga2XbpH0AAAAAACDjCPgBAAAAAHAhAn4AAAAAAFyIOfwAAAAAgBSx3GzORg8/AAAAAAAuRMAPAAAAAIALMaQfAAAAAJAhDPnP3ujhBwAAAADAhQj4AQAAAABwIQJ+AAAAAABciDn8AAAAyJK5vACArEUPPwAAAAAALkQPPwAAANKMXnwAgfo3gwz+mY+AHwAAAACQ5VjSL/MxpB8AAAAAABci4AcAAAAAwIUI+AEAAAAAcCECfgAAAAAAXIikfQAAACGELPsAQuHfKxL+hWDAP2HCBBkxYoQkJSVJzZo1Zfz48XLbbbcF+7QAAECAUNf/H4J6AKGOFQBCLOD/+OOPpU+fPjJp0iSpX7++jBkzRqKjo2Xnzp0SFRUV7NMDAABXKZTqegJ6AEBahEzAP2rUKOnSpYs8+eST9lwbA/Pnz5cPPvhAXnrppWCfHgAAuEpuqusJ6AEg5/47PSUbjR4IiYD/7NmzsnHjRhkwYID3tVy5cknz5s0lMTHxkvJnzpyxzXH06FF7PHbsWODO6dSJVPcF8nOA7Ii/f7jZ5f6+A/k37hzH4/EE5HihVtdnRX3f48ONATkOACDwjl3h3/qr+Tc8O9X1IRHw//e//5ULFy5IiRIl/F7X599///0l5ePj42Xw4MGXvF62bFnJCv96Jks+BsiW+PuH2wX6b/z48eNSpEgRCXXpreuzQ30PAHBnm/Nf2aiuD4mAP720d0DnADouXrwof/75pxQvXlzCwsICcqdGGxP79++XyMjIqz4ekJPw949QFsi/f73brw2A0qVLB+z8Qs3V1vf8e3b1uIZXj2t49biGV49rmDnXMBB1fUgE/Ndee63kzp1bDh486Pe6Pi9ZsuQl5SMiImzzVbRo0YCfl/4i+R8CoYq/f4SyQP3907Of8bo+kPU9/55dPa7h1eMaXj2u4dXjGgb+Gl5tXZ9LQkDevHmlTp06snTpUr+7+Pq8YcOGQT03AABw9ajrAQAI0R5+pUP2OnbsKHXr1rX1eHWpnpMnT3oz+QIAgJyNuh4AgBAN+B955BH5/fffJS4uTpKSkqRWrVqycOHCS5L7ZAUdPvjqq69eMowQCAX8/SOU8ffvrrqe3+fV4xpePa7h1eMaXj2uYfa9hmEe1vMBAAAAAMB1QmIOPwAAAAAAoYaAHwAAAAAAFyLgBwAAAADAhQj4s8jdd98tvXr1CvZpAAAAAABCBAE/AABAKiZMmCDXX3+95MuXT+rXry/r1q27bPnZs2dLpUqVrHz16tVlwYIFEurScw0nT54sd9xxh1xzzTW2NW/e/IrXPBSk9+/QMXPmTAkLC5O2bdtKqEvvNTxy5Ij06NFDSpUqZVnTb7nllpD//zm911CXRq1YsaLkz59fypYtK71795bTp09LqFq5cqXcf//9Urp0afv/cu7cuVd8z/Lly6V27dr2N3jTTTdJQkJCuj+XgB8AACAFH3/8sfTp08eWSfrmm2+kZs2aEh0dLYcOHUqx/OrVq+Wxxx6T2NhY2bRpkwVZum3dulVCVXqvoTZu9Rp+9dVXkpiYaEFCixYt5Ndff5VQld5r6Pjpp5/kxRdftBsooS691/Ds2bNyzz332DX85JNPZOfOnXYz6m9/+5uEqvRewxkzZshLL71k5Xfs2CFTpkyxY7z88ssSqk6ePGnXTW+cpMXevXuldevW0qRJE9m8ebONFu/cubMsWrQofR+sy/Ih8911112eHj162BYZGekpXry4Z+DAgZ6LFy8G+9SALPHFF194GjVq5ClSpIinWLFintatW3v27NkT7NMCssyFCxc8b775pufGG2/05M2b11O2bFnPsGHDgn1auIzbbrvN6m3f32Hp0qU98fHxKZb/xz/+Yf+2+apfv77n6aef9oSq9F7D5M6fP+8pXLiwZ9q0aZ5QlZFrqNft9ttv97z//vuejh07etq0aeMJZem9hhMnTvTccMMNnrNnz2bhWbrrGmrZpk2b+r3Wp08fawvC49EwfM6cOZct069fP0/VqlX9XnvkkUc80dHR6foseviz0LRp0yQ8PNyGv4wdO1ZGjRol77//frBPC8iyu5p6Z3jDhg2ydOlSyZUrlzzwwANy8eLFYJ8akCUGDBggb7zxhrzyyiuyfft26/0oUaJEsE8LqdAevo0bN9qQcof+u6XPtec5Jfq6b3mlPWCplXe7jFzD5P766y85d+6cFCtWTEJRRq/hkCFDJCoqykabhLqMXMPPPvtMGjZsaEP69d/patWqyeuvvy4XLlyQUJSRa3j77bfbe5xh/z/++KNNiWjVqlWWnXdOlxigOiU8wOeFy9BhaaNHj7Y5GzqfZcuWLfa8S5cuwT41INPFxMT4Pf/ggw/kuuuus8BHK1LAzY4fP243et9++23p2LGjvXbjjTdK48aNg31qSMV///tfa9wnvymjz7///vsU35OUlJRieX09FGXkGibXv39/m++avNEbKjJyDb/++msbPq1DgJGxa6jB6bJly6Rdu3YWpO7Zs0eeeeYZu/mkQ9RDTUau4eOPP27v03pOO7TPnz8v3bp1C+kh/emVWp1y7NgxOXXqlOVGSAt6+LNQgwYNLNh36J3D3bt3h+zdQoQW/VvXeZk33HCDREZGWtIXtW/fvmCfGpDpdP7imTNnpFmzZsE+FSDH0BExmnRuzpw5liQMabu52L59e5tvfu211wb7dHIsHX2oIyTee+89qVOnjjzyyCPyz3/+UyZNmhTsU8sxNB+Hjop45513bM7/p59+KvPnz5ehQ4cG+9RCDj38ALKEZiUtX768NUK0t0YrU+3Z12FigNul9S48sg8NlnLnzi0HDx70e12flyxZMsX36OvpKe92GbmGjrfeessC/i+//FJq1KghoSq91/CHH36wRHNa5zqcqXM6rVSTz+noolCSkb9DzcyfJ08ee5+jcuXK1uOq7Za8efNKKMnINdTpa3rzSZPMKV21RKd3du3a1W6e6JQAXF5qdYp2nKWnXcGVzkJr1671e75mzRq5+eab/f4xAdzojz/+sEbGwIEDrYdTK83Dhw8H+7SALKP/1mvlrPkrkDNog1579nx/Zxo46XMdoZcSfT3573jJkiWplne7jFxDNXz4cOsFXLhwodStW1dCWXqvoS4JqVNGdTi/s/3973/3ZvnW6aWhJiN/h40aNbJh/L55hnbt2mU3AkIt2M/oNdT8G8mDeifm+b+cdbiSgNUpGUoriAxl6S9UqJCnd+/enu+//94zY8YMT8GCBT2TJk0K9qkBmU4zuerKFE888YRn9+7dnqVLl3rq1auXpgylgFsMGjTIc80111i2cV2hIjEx0TJoI/uaOXOmJyIiwpOQkODZvn27p2vXrp6iRYt6kpKSbH/79u09L730krf8qlWrPOHh4Z633nrLs2PHDs+rr77qyZMnj2fLli2eUJXea/jGG2/YKhaffPKJ57fffvNux48f94Sq9F7D5MjSn/5ruG/fPlsdomfPnp6dO3d65s2b54mKigrplVXSew313z+9hh999JHnxx9/9CxevNhWqdHVTELV8ePHPZs2bbJN28CjRo2yn3/++Wfbr9dPr6NDr1uBAgU8ffv2tTplwoQJnty5c3sWLlyYrs8l4M/CgP+ZZ57xdOvWzZbl00bfyy+/zLJ8CBlLlizxVK5c2SqLGjVqeJYvX07Aj5C78aWNxfLly1sQWK5cOc/rr78e7NPCFYwfP95+VxqE6rJUa9as8avbNZjyNWvWLM8tt9xi5XU5pfnz53tCXXquof7/oXVD8k2Dh1CW3r9DXwT8GbuGq1evtmU1td2iS/S99tprttxhKEvPNTx37pzd6NYgP1++fLYUrcZChw8f9oSqr776KsV/35zrpo96HZO/p1atWnbN9e9w6tSp6f7cMP1P+sYEAAAAAACA7I45/AAAAAAAuBABPwAAAAAALkTADwAAAACACxHwAwAAAADgQgT8AAAAAAC4EAE/AAAAAAAuRMAPAAAAAIALEfADAAAAAOBCBPwAAiIhIUGKFi0asOMtX75cwsLC5MiRIwE7JgAAbuDxeKRr165SrFgxqyu1/u3Vq1ewTwtANkTAD+RAgwYNklq1akl28sgjj8iuXbuCfRoAALjewoUL7Ub7vHnz5LfffpNq1aoF/DPuvvtubiIALhAe7BMAkPOdO3dO8ufPb5ubnD17VvLmzRvs0wAAwM8PP/wgpUqVkttvv92eh4e7t0lPXQxcHXr4gSC5ePGiDB8+XG666SaJiIiQcuXKyWuvvWb7+vfvL7fccosUKFBAbrjhBnnllVcsqFZ6R3/w4MHy7bff2jA+3fQ1pcPfO3fuLNddd51ERkZK06ZNrZyvYcOGSVRUlBQuXNjKvvTSS36jBfS8hgwZImXKlLHz0n3ak+D46aef7DM//vhjueuuuyRfvnzy4Ycfpjik//PPP5d69epZmWuvvVYeeOAB777/+Z//kbp169p5lCxZUh5//HE5dOhQhq6l89lz586Vm2++2T4vOjpa9u/f79c4atOmjZQoUUIKFSpk5/Xll1/6Hef666+XoUOHSocOHez66XDJK/0+fEdcfPDBB/Z71OM/88wzcuHCBfsd6/fTa+78fgEAyKhOnTrJs88+K/v27bP6WOuu5A4fPmx12TXXXGN1V8uWLWX37t3e/X/88Yc89thj8re//c32V69eXT766CO/z1ixYoWMHTvW29bQ+j8tU/Hmz58vNWrUsLq4QYMGsnXr1jR/rjOyoGfPnja6QNsOWp+rUaNGWfmCBQtK2bJlrZ49ceLEJW0BHfVQsWJFO/5DDz0kf/31l0ybNs2uk16P5557zupnIFQQ8ANBMmDAAHnjjTcseNy+fbvMmDHDglGlQbBWXPq6VraTJ0+W0aNHe4fOv/DCC1K1alUbxqebvqYefvhhC5q/+OIL2bhxo9SuXVuaNWsmf/75p+3XwFyDzjfffNP2a3A6ceJEv/PSzxs5cqS89dZb8t1331lF+/e//92voaD0RsHzzz8vO3bs8FbGvrTC1wC/VatWsmnTJlm6dKncdttt3v0aMGtwrTckNFDXhoQ2MDJKK3T9btOnT5dVq1bZzY9HH33Uu18bBXoueh56Pvfee6/cf//91mDypd+7Zs2aVkZ/N1f6ffjeUNDrrjdHtPEyZcoUad26tfzyyy/WaNJrPnDgQFm7dm2GvyMAAFoPOTfmtQ2wfv36S8pofbphwwb57LPPJDEx0eb8ax3o3Kw+ffq01KlTx+pqDcj1Bnf79u1l3bp13s9o2LChdOnSxdvW0CA7Lfr27WvtCD0v7YDQujatn+vQAF179bU+nzRpkr2WK1cuGTdunGzbts32L1u2TPr163dJW0DLzJw50+pjvQmhbZEFCxbYpp0N7777rnzyyScZvPpADuQBkOWOHTvmiYiI8EyePDlN5UeMGOGpU6eO9/mrr77qqVmzpl+Z//znP57IyEjP6dOn/V6/8cYbPe+++679XL9+fU+PHj389jdq1MjvWKVLl/a89tprfmXq1avneeaZZ+znvXv3evSfjjFjxviVmTp1qqdIkSLe5w0bNvS0a9fOk1br16+34x4/ftyef/XVV/b88OHDV3yvfraWXbNmjfe1HTt22Gtr165N9X1Vq1b1jB8/3vu8fPnynrZt22bo91GgQAH7vTqio6M9119/vefChQve1ypWrOiJj4+/4vEBALic0aNHW53luOuuuzzPP/+8/bxr1y6r/1atWuXd/9///teTP39+z6xZs1I9ZuvWrT0vvPBCisdMC6fenjlzpve1P/74wz73448/Ttfn3nrrrVf8vNmzZ3uKFy9+SVtgz5493teefvppq5+dtoVTP+vrQKhw74QfIBvTXvEzZ85Y73tKdLi83qHWXmPtmT5//rwNMb8c7SnXssWLF/d7/dSpU3YctXPnThsC50t73fUuuTp27JgcOHBAGjVq5FdGnyefGqDD8S9n8+bN1jOQGh1hoEPh9bg69FCnEijtca9SpYqkl85f1GH6jkqVKtnQPr3W+h312ujnaa+C9lToNdVrk7yHP6XvlZbfhw4V1JEADh2tkTt3buuR8H0to9MWAABIC633tE6sX7++9zVtG+gwd92ndEj766+/LrNmzZJff/3V5slru0SHwV8tHRng0FUEMvK5OgogOZ2GFx8fL99//721V7Qu1hED2qvvvF8fb7zxRr96V+tnnWrn+xp1MUIJAT8QBJdLbqdD79q1a2fz9HWofJEiRWxomg6PuxwNRDWBjw5fSy6Qy+U5dA5dRr/jyZMn7bvpptMMdMifBt76XCv/zPDiiy/KkiVLbMi+5k3Q89O5fck/L/n3SuvvI0+ePH7PdR5jSq85NzYAAAiWESNG2LD9MWPGeOfF65z5zKqD0/u5yetinfZ33333Sffu3W36nt5I+PrrryU2Ntbe6wT81MXApQj4gSDQxHIacOp8ck2c52v16tVSvnx5+ec//+l97eeff/Yro/Pakiec0fn6SUlJdlc/pQQ+Su+y65w6TeTj8J37p73WpUuXtjlzmpDPoc9959+nhSbs0e/35JNPXrJP785r4h7NYeDMCdS5hldD7/TrMZzz1NEMOo+/cuXK3u+gcxqdxIF6g+RKCYjS+vsAACC70HpP60TNGeNk8dc6V+tFZwSd1omayPaJJ56w5xoA69K6viPsUmprpMWaNWssR5DSEXx6XN+6+Eqfm9qoQC2rN9udkXM6SgDAlRHwA0GgmWs187smm9EKVYfM//7775aIRm8GaG+39iLrEHUdgj5nzhy/92tAv3fvXhs2r0l7dCh58+bNbRhd27ZtLTO8ZpXX4flO8jwdqq5ZfXWYvf6sjQAdqq6J+TTzvG+ynVdffdWGxGnm+alTp9rnaE98eugxdMqCHkeT52njQxPm6PfWhoB+7/Hjx0u3bt0scY8m8Lsaegdfv58OvdebHprhV7MDOzcA9Lp++umnljxI7+5rQr603OFPy+8DAIDsQustDaq1vtcEddpG0ES7mhlfX3fKaOI6vamtmes1A/7Bgwf9Am9ta+hNA705rkPitVfdd5paajShoE4h0KHzerNcM+1r2yStn5sSHZmnif+03aD1uG8yPwCXR5Z+IEg04NRs+3FxcXbnWzPt65wyzYjfu3dvC1g14NZK0ckW74iJibEs802aNLHh8JoVXoNYDajvvPNO61XXgF8Dbe2NdrL/69B0XR1Ah7friAC9aaC93noDwqHL1fTp08fOTYfbaZZbzfKrlXR66LI6s2fPtvfq99AlAp0svHrOmvVe92slrz39OtT+auhwPr2ZoMv76Q0UbZzoDQ2HNiq0caE3OrSxoMPz9RpcSVp+HwAAZCd6s17nwesweO0M0Cz92kZwhrfrqjFaB2pdqPW1Lh/rBOUObStoLhqtp52pd2mhdbqu4qOfryMPdYlevcmf1s9Nia6eo/W4rnhTrVo164TQ+fwArixMM/eloRwAl7rnnnuswtWlanIqvXmgcwB1CD8AAMh6mkNIOyJ0GH9m5A4CkDEM6QdCiGay1SFwemdd79rryADNeqvJ7AAAAAC4C0P6gRDiO+xfh9rpMLv//d//tfn/2VnLli1tiH5Kmy7vAwAAMpfm3EmtLtZ9ALInhvQDyPZ0rd5Tp06luE+TCOkGAAAyj+YZOnbsWIr7dJWfqKioLD8nAFdGwA8AAAAAgAsxpB8AAAAAABci4AcAAAAAwIUI+AEAAAAAcCECfgAAAAAAXIiAHwAAAAAAFyLgBwAAAADAhQj4AQAAAAAQ9/l/RrxKrK6AP2UAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" + "name": "stdout", + "output_type": "stream", + "text": [ + "Original pipeline:\n", + "PipelineSpace SimpleSpace with parameters:\n", + "\tint_param1 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.LOW)\n", + "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", + "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", + "\n", + "==================================================\n", + "After adding new float:\n", + "PipelineSpace SimpleSpace with parameters:\n", + "\tint_param1 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.LOW)\n", + "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", + "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", + "\tparam_4 = Float(0.0, 1.0)\n", + "\n", + "==================================================\n", + "After removing 'int_param1':\n", + "PipelineSpace SimpleSpace with parameters:\n", + "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", + "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", + "\tparam_4 = Float(0.0, 1.0)\n", + "\n", + "==================================================\n", + "After adding 'int_param1' twice and once with different upper:\n", + "Error occurred: A different parameter with the name 'int_param1' already exists in the pipeline:\n", + " Float(0.0, 1.0)\n", + " Float(0.0, 2.0)\n", + "PipelineSpace SimpleSpace with parameters:\n", + "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", + "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", + "\tparam_4 = Float(0.0, 1.0)\n", + "\tint_param1 = Float(0.0, 1.0)\n", + "\n", + "==================================================\n", + "After removing 'int_param1':\n", + "PipelineSpace SimpleSpace with parameters:\n", + "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", + "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", + "\tparam_4 = Float(0.0, 1.0)\n" + ] } ], "source": [ - "\"\"\"\n", - "This example demonstrates the full capabilities of NePS Spaces\n", - "by defining a neural network architecture using PyTorch modules.\n", - "It showcases how to interact with the NePS Spaces API to create,\n", - "sample and evaluate a neural network pipeline.\n", - "It also demonstrates how to convert the pipeline to a callable\n", - "and how to run NePS with the defined pipeline and space.\n", - "\"\"\"\n", - "\n", "import numpy as np\n", "import torch\n", "import torch.nn as nn\n", "import neps\n", "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled\n", - "from neps.space.neps_spaces import sampling\n", - "from neps.space.neps_spaces import neps_space\n", "\n", "# Define the NEPS space for the neural network architecture\n", "class SimpleSpace(PipelineSpace):\n", " int_param1 = neps.Integer(1,100, prior=50, prior_confidence=\"low\")\n", " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\")\n", " int_param3 = neps.Integer(1,100, prior=50, prior_confidence=\"high\")\n", - " int_param4 = neps.Integer(1,4, prior=1.5, prior_confidence=\"low\")\n", - " categorical_param = Categorical((\"a\", \"b\", \"c\"), prior=0, prior_confidence=\"high\")\n", - " float_param = neps.Float(0, 1.0, prior=0.5, prior_confidence=\"high\")\n", "\n", - "# Sampling and printing one random configuration of the pipeline\n", + "class OtherSpace(PipelineSpace):\n", + " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\", log=False)\n", + "\n", + "# Test operations\n", "pipeline = SimpleSpace()\n", - "random_sampler = sampling.RandomSampler({})\n", - "sampler = sampling.PriorOrFallbackSampler(fallback_sampler=random_sampler, always_use_prior=False)\n", + "print(\"Original pipeline:\")\n", + "print(pipeline)\n", "\n", - "values = {\"int_param1\": [],\n", - " \"int_param2\": [], \"int_param3\": [], \"int_param4\": [], \n", - " \"categorical_param\": [], \"float_param\": []}\n", - "for i in range(10000):\n", - " resolved_pipeline, resolution_context = neps_space.resolve(pipeline,domain_sampler=sampler)\n", + "print(\"\\n\" + \"=\"*50)\n", + "print(\"After adding new float:\")\n", + "pipeline=pipeline+neps.Float(0.0, 1.0) \n", + "print(pipeline)\n", "\n", - " # s = resolved_pipeline.int_param1\n", - " # print(resolved_pipeline.get_attrs())\n", - " values[\"int_param1\"].append(resolved_pipeline.int_param1)\n", - " values[\"int_param2\"].append(resolved_pipeline.int_param2)\n", - " values[\"int_param3\"].append(resolved_pipeline.int_param3)\n", - " values[\"int_param4\"].append(resolved_pipeline.int_param4)\n", - " values[\"categorical_param\"].append(resolved_pipeline.categorical_param)\n", - " values[\"float_param\"].append(resolved_pipeline.float_param)\n", + "print(\"\\n\" + \"=\"*50)\n", + "print(\"After removing 'int_param1':\")\n", + "pipeline=pipeline.remove(\"int_param1\") \n", + "print(pipeline)\n", + "\n", + "print(\"\\n\" + \"=\"*50)\n", + "print(\"After adding 'int_param1' twice and once with different upper:\")\n", + "pipeline=pipeline.add(neps.Float(0.0, 1.0), \"int_param1\")\n", + "pipeline=pipeline.add(neps.Float(0.0, 1.0), \"int_param1\")\n", + "try:\n", + " pipeline=pipeline.add(neps.Float(0.0, 2.0), \"int_param1\")\n", + "except ValueError as e:\n", + " print(f\"Error occurred: {e}\")\n", + "print(pipeline)\n", + "\n", + "print(\"\\n\" + \"=\"*50)\n", + "print(\"After removing 'int_param1':\")\n", + "pipeline=pipeline.remove(\"int_param1\")\n", + "print(pipeline)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "efd7be91", + "metadata": {}, + "outputs": [], + "source": [ + "# Test the add method as well\n", + "print(\"Testing add method...\")\n", + "result_add = fresh_pipeline.add(neps.Float(0, 1), \"new_float\")\n", + "print(f\"Add returned same object? {result_add is fresh_pipeline}\")\n", + "\n", + "print(\"\\nAfter adding new_float:\")\n", + "print(fresh_pipeline)\n", "\n", - "# Plot the distribution of the sampled values, each in a separate subplot\n", - "import matplotlib.pyplot as plt\n", - "_, axs = plt.subplots(3, 2, figsize=(12, 12))\n", - "axs = axs.flatten()\n", - "for i, (param_name, param_values) in enumerate(values.items()):\n", - " axs[i].hist(param_values,align='mid',bins=50,alpha=0.7)\n", - " axs[i].set_title(f'Distribution of {param_name}')\n", - " axs[i].set_xlabel(param_name)\n", - " axs[i].set_ylabel('Density')\n", - "plt.show()\n" + "# Test method chaining\n", + "print(\"\\nTesting method chaining...\")\n", + "fresh_pipeline.remove(\"param_b\").add(neps.Categorical([\"x\", \"y\", \"z\"]), \"new_cat\")\n", + "print(\"After chaining remove + add:\")\n", + "print(fresh_pipeline)" ] }, { From cfb4b11e9bc6c0e00ef2566d283b7871c3135115 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sat, 11 Oct 2025 18:34:22 +0200 Subject: [PATCH 075/156] Introduce NePS-space compatible Grid Search and Hyperband variants --- neps/api.py | 8 +- neps/optimizers/algorithms.py | 142 +++++++++++-- neps/optimizers/neps_bracket_optimizer.py | 7 +- neps/optimizers/utils/grid.py | 113 +++++++--- neps/runtime.py | 9 +- neps/space/neps_spaces/neps_space.py | 93 ++++----- neps/space/neps_spaces/parameters.py | 49 ++++- neps_examples/basic_usage/algo_tests.ipynb | 219 ++++++++++++++++++++ neps_examples/basic_usage/priors_test.ipynb | 48 ++--- 9 files changed, 549 insertions(+), 139 deletions(-) create mode 100644 neps_examples/basic_usage/algo_tests.ipynb diff --git a/neps/api.py b/neps/api.py index 51e563a29..4b7d4fefc 100644 --- a/neps/api.py +++ b/neps/api.py @@ -13,8 +13,6 @@ from typing import TYPE_CHECKING, Any, Concatenate, Literal import neps -import neps.optimizers.algorithms -import neps.optimizers.neps_bracket_optimizer from neps.optimizers import AskFunction, OptimizerChoice, load_optimizer from neps.optimizers.ask_and_tell import AskAndTell from neps.runtime import _launch_runtime, _save_results @@ -438,7 +436,6 @@ def __call__( converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space: pipeline_space = converted_space - space = convert_to_space(pipeline_space) if neps_classic_space_compatibility == "neps" and not isinstance( @@ -473,6 +470,11 @@ def __call__( "moasha", "mo_hyperband", "primo", + "neps_priorband", + "neps_random_search", + "complex_random_search", + "neps_bracket_optimizer", + "neps_hyperband", } is_multi_fidelity = _optimizer_info["name"] in multi_fidelity_optimizers diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index f10cb878e..b12767def 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -171,10 +171,12 @@ def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 *, bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], eta: int, - sampler: Literal["uniform", "prior", "priorband", "mopriorsampler"] - | PriorBandSampler - | MOPriorSampler - | Sampler, + sampler: ( + Literal["uniform", "prior", "priorband", "mopriorsampler"] + | PriorBandSampler + | MOPriorSampler + | Sampler + ), bayesian_optimization_kick_in_point: int | float | None, sample_prior_first: bool | Literal["highest_fidelity"], # NOTE: This is the only argument to get a default, since it @@ -468,7 +470,9 @@ def random_search( pipeline_space: The search space to sample from. use_priors: Whether to use priors when sampling. ignore_fidelity: Whether to ignore fidelity when sampling. - In this case, the max fidelity is always used. + Setting this to "highest fidelity" will always sample at max fidelity. + Setting this to True will randomly sample from the fidelity like any other + parameter. """ if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) @@ -535,7 +539,8 @@ def random_search( def grid_search( pipeline_space: SearchSpace | PipelineSpace, *, - ignore_fidelity: bool = False, + ignore_fidelity: bool | Literal["highest fidelity"] = False, + size_per_numerical_dimension: int = 5, ) -> GridSearch: """A simple grid search algorithm which discretizes the search space and evaluates all possible configurations. @@ -543,7 +548,11 @@ def grid_search( Args: pipeline_space: The search space to sample from. ignore_fidelity: Whether to ignore fidelity when sampling. - In this case, the max fidelity is always used. + Setting this to "highest fidelity" will always sample at max fidelity. + Setting this to True will make a grid over the fidelity like any other + parameter. + size_per_numerical_dimension: The number of points to use per numerical + dimension when discretizing the space. """ from neps.optimizers.utils.grid import make_grid @@ -552,23 +561,83 @@ def grid_search( if converted_space is not None: pipeline_space = converted_space else: - raise ValueError( - "This optimizer only supports HPO search spaces, please use a NePS" - " space-compatible optimizer." - ) + return neps_grid_search(pipeline_space, ignore_fidelity=ignore_fidelity, size_per_numerical_dimension=size_per_numerical_dimension) if any( parameter.prior is not None for parameter in pipeline_space.searchables.values() ): - raise ValueError("Grid search does not support priors.") + logger.warning("Grid search does not support priors, they will be ignored.") if ignore_fidelity and pipeline_space.fidelity is None: logger.warning( "Warning: You are using ignore_fidelity, but no fidelity is defined in the" " search space. Consider setting ignore_fidelity to False." ) + if not ignore_fidelity and pipeline_space.fidelity is not None: + raise ValueError( + "Fidelities are not supported for GridSearch natively. Consider setting the" + " fidelity to a constant value, or setting ignore_fidelity to True to sample" + " from it like any other parameter or 'highest fidelity' to always sample at" + f" max fidelity. Got fidelity: {pipeline_space.fidelities} " + ) return GridSearch( - configs_list=make_grid(pipeline_space, ignore_fidelity=ignore_fidelity) + configs_list=make_grid(pipeline_space, ignore_fidelity=ignore_fidelity, size_per_numerical_hp=size_per_numerical_dimension) + ) + + +def neps_grid_search( + pipeline_space: PipelineSpace, + *, + ignore_fidelity: bool | Literal["highest fidelity"] = False, + size_per_numerical_dimension: int = 5, +) -> GridSearch: + """A simple grid search algorithm which discretizes the search + space and evaluates all possible configurations. + + Args: + pipeline_space: The search space to sample from. + ignore_fidelity: Whether to ignore fidelity when sampling. + Setting this to "highest fidelity" will always sample at max fidelity. + Setting this to True will make a grid over the fidelity like any other + parameter. + size_per_numerical_dimension: The number of points to use per numerical + dimension when discretizing the space. + """ + from neps.optimizers.utils.grid import make_grid + + if not isinstance(pipeline_space, PipelineSpace): + raise ValueError( + "This optimizer only supports NePS spaces, please use a classic" + " search space-compatible optimizer." + ) + parameters = pipeline_space.get_attrs().values() + non_fid_parameters = [ + parameter + for parameter in parameters + if parameter not in pipeline_space.fidelity_attrs.values() + ] + if any( + parameter.has_prior # type: ignore + for parameter in non_fid_parameters + if isinstance(parameter, Resolvable) + and isinstance(parameter, Integer | Float | Categorical) + ): + logger.warning("Grid search does not support priors, they will be ignored.") + if not pipeline_space.fidelity_attrs and ignore_fidelity: + logger.warning( + "Warning: You are using ignore_fidelity, but no fidelity is defined in the" + " search space. Consider setting ignore_fidelity to False." + ) + if pipeline_space.fidelity_attrs and not ignore_fidelity: + raise ValueError( + "Fidelities are not supported for GridSearch natively. Consider setting the" + " fidelity to a constant value, or setting ignore_fidelity to True to sample" + " from it like any other parameter or 'highest fidelity' to always sample at" + f" max fidelity. Got fidelity: {pipeline_space.fidelity_attrs} " + ) + + return GridSearch( + configs_list=make_grid(pipeline_space, ignore_fidelity=ignore_fidelity, size_per_numerical_hp=size_per_numerical_dimension) ) @@ -797,7 +866,7 @@ def hyperband( eta: int = 3, sampler: Literal["uniform", "prior"] = "uniform", sample_prior_first: bool | Literal["highest_fidelity"] = False, -) -> BracketOptimizer: +) -> BracketOptimizer | _NePSBracketOptimizer: """Another bandit-based optimization algorithm that uses a _fidelity_ parameter, very similar to [`successive_halving`][neps.optimizers.algorithms.successive_halving], but hedges a bit more on the safe side, just incase your _fidelity_ parameters @@ -844,12 +913,14 @@ def hyperband( """ if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) - if converted_space is not None: + if converted_space: pipeline_space = converted_space else: - raise ValueError( - "This optimizer only supports HPO search spaces, please use a NePS" - " space-compatible optimizer." + return neps_hyperband( + pipeline_space, + eta=eta, + sampler=sampler, + sample_prior_first=sample_prior_first, ) return _bracket_optimizer( pipeline_space=pipeline_space, @@ -864,6 +935,39 @@ def hyperband( ) +def neps_hyperband( + pipeline_space: PipelineSpace, + *, + eta: int = 3, + sampler: Literal["uniform", "prior"] = "uniform", + sample_prior_first: bool | Literal["highest_fidelity"] = False, +) -> _NePSBracketOptimizer: + """ + Hyperband optimizer for NePS search spaces. + Args: + pipeline_space: The search space to sample from. + eta: The reduction factor used for building brackets + sampler: The type of sampling procedure to use: + + * If `#!python "uniform"`, samples uniformly from the space when + it needs to sample. + * If `#!python "prior"`, samples from the prior + distribution built from the `prior` and `prior_confidence` + values in the search space. + + sample_prior_first: Whether to sample the prior configuration first, + and if so, should it be at the highest fidelity level. + """ + return _neps_bracket_optimizer( + pipeline_space=pipeline_space, + bracket_type="hyperband", + eta=eta, + sampler="prior" if sampler == "prior" else "uniform", + sample_prior_first=sample_prior_first, + early_stopping_rate=None, + ) + + def mo_hyperband( pipeline_space: SearchSpace | PipelineSpace, *, @@ -1716,6 +1820,7 @@ def neps_priorband( neps_random_search, complex_random_search, neps_priorband, + neps_hyperband, ) } @@ -1736,4 +1841,5 @@ def neps_priorband( "neps_random_search", "complex_random_search", "neps_priorband", + "neps_hyperband", ] diff --git a/neps/optimizers/neps_bracket_optimizer.py b/neps/optimizers/neps_bracket_optimizer.py index e184143b4..976b55296 100644 --- a/neps/optimizers/neps_bracket_optimizer.py +++ b/neps/optimizers/neps_bracket_optimizer.py @@ -137,8 +137,13 @@ def __call__( # noqa: C901 if isinstance(self.sampler, NePSPriorBandSampler): config = self.sampler.sample_config(table, rung=rung) elif isinstance(self.sampler, DomainSampler): + environment_values={} + fidelity_attrs = self.space.fidelity_attrs + assert len(fidelity_attrs) == 1, "TODO: [lum]" + for fidelity_name, fidelity_obj in fidelity_attrs.items(): + environment_values[fidelity_name] = self.rung_to_fid[rung] _, resolution_context = neps_space.resolve( - self.space, domain_sampler=self.sampler + self.space, domain_sampler=self.sampler, environment_values=environment_values ) config = neps_space.NepsCompatConverter.to_neps_config( resolution_context diff --git a/neps/optimizers/utils/grid.py b/neps/optimizers/utils/grid.py index aa152c66c..f65300b75 100644 --- a/neps/optimizers/utils/grid.py +++ b/neps/optimizers/utils/grid.py @@ -1,7 +1,7 @@ from __future__ import annotations from itertools import product -from typing import Any +from typing import Any, Literal import torch @@ -14,12 +14,16 @@ SearchSpace, ) +from neps import PipelineSpace, Categorical, Float, Integer, Fidelity +from neps.space.neps_spaces import neps_space +from neps.space.neps_spaces.sampling import RandomSampler + def make_grid( - space: SearchSpace, + space: SearchSpace | PipelineSpace, *, size_per_numerical_hp: int = 10, - ignore_fidelity: bool = True, + ignore_fidelity: bool | Literal["highest fidelity"] = False ) -> list[dict[str, Any]]: """Get a grid of configurations from the search space. @@ -39,29 +43,86 @@ def make_grid( A list of configurations from the search space. """ param_ranges: dict[str, list[Any]] = {} - for name, hp in space.items(): - match hp: - case HPOCategorical(): - param_ranges[name] = list(hp.choices) - case HPOConstant(): - param_ranges[name] = [hp.value] - case HPOInteger() | HPOFloat(): - if hp.is_fidelity and ignore_fidelity: - param_ranges[name] = [hp.upper] - continue + if isinstance(space, SearchSpace): + for name, hp in space.items(): + match hp: + case HPOCategorical(): + param_ranges[name] = list(hp.choices) + case HPOConstant(): + param_ranges[name] = [hp.value] + case HPOInteger() | HPOFloat(): + if hp.is_fidelity: + match ignore_fidelity: + case "highest fidelity": + param_ranges[name] = [hp.upper] + continue + case True: + param_ranges[name] = [hp.lower, hp.upper] + case False: + raise ValueError("Grid search does not support fidelity " + "natively. Please use the" + "ignore_fidelity parameter.") + if hp.domain.cardinality is None: + steps = size_per_numerical_hp + else: + steps = min(size_per_numerical_hp, hp.domain.cardinality) - if hp.domain.cardinality is None: + xs = torch.linspace(0, 1, steps=steps) + numeric_values = hp.domain.cast(xs, frm=Domain.unit_float()) + uniq_values = torch.unique(numeric_values).tolist() + param_ranges[name] = uniq_values + case _: + raise NotImplementedError(f"Unknown Parameter type: {type(hp)}\n{hp}") + keys = list(space.keys()) + values = product(*param_ranges.values()) + return [dict(zip(keys, p, strict=False)) for p in values] + elif isinstance(space, PipelineSpace): + fid_ranges = {} + for name, hp in space.get_attrs().items(): + match hp: + case Categorical(): + param_ranges[name] = range(len(hp.choices)) + case Fidelity(): + if ignore_fidelity == "highest fidelity": + fid_ranges[name] = [hp.max_value] + continue + elif ignore_fidelity is True: + fid_ranges[name] = [hp.min_value, hp.max_value] + else: + raise ValueError("Grid search does not support fidelity natively." \ + " Please use the ignore_fidelity parameter.") + case Integer() | Float(): steps = size_per_numerical_hp - else: - steps = min(size_per_numerical_hp, hp.domain.cardinality) - - xs = torch.linspace(0, 1, steps=steps) - numeric_values = hp.domain.cast(xs, frm=Domain.unit_float()) - uniq_values = torch.unique(numeric_values).tolist() - param_ranges[name] = uniq_values - case _: - raise NotImplementedError(f"Unknown Parameter type: {type(hp)}\n{hp}") - values = product(*param_ranges.values()) - keys = list(space.keys()) + xs = torch.linspace(0, 1, steps=steps) + numeric_values = xs * (hp.max_value - hp.min_value) + hp.min_value + if isinstance(hp, Integer): + numeric_values = torch.round(numeric_values) + uniq_values = torch.unique(numeric_values).tolist() + param_ranges[name] = uniq_values + case _: + raise NotImplementedError(f"Parameter type: {type(hp)}\n{hp} not supported yet in GridSearch") + keys = list(param_ranges.keys()) + values = product(*param_ranges.values()) + config_dicts = [dict(zip(keys, p, strict=False)) for p in values] + keys_fid = list(fid_ranges.keys()) + values_fid = product(*fid_ranges.values()) + fid_dicts = [dict(zip(keys_fid, p, strict=False)) for p in values_fid] + configs = [] + random_config = neps_space.NepsCompatConverter.to_neps_config(neps_space.resolve( + pipeline=space, + domain_sampler=RandomSampler(predefined_samplings={}), + environment_values=fid_dicts[0], + )[1]) - return [dict(zip(keys, p, strict=False)) for p in values] + for config_dict in config_dicts: + for fid_dict in fid_dicts: + new_config = {} + for param in random_config.keys(): + for key in config_dict.keys(): + if key in param: + new_config[param] = config_dict[key] + for key in fid_dict.keys(): + if key in param: + new_config[param] = fid_dict[key] + configs.append(new_config) + return configs diff --git a/neps/runtime.py b/neps/runtime.py index a57c0a334..a2af213bb 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -48,6 +48,7 @@ WorkerSettings, evaluate_trial, ) +from neps.space.neps_spaces.neps_space import PipelineSpace, NepsCompatConverter from neps.status.status import _initiate_summary_csv, status from neps.utils.common import gc_disabled @@ -351,7 +352,13 @@ def _check_global_stopping_criterion( if self.settings.fidelities_to_spend is not None and hasattr( self.optimizer, "space" ): - fidelity_name = next(iter(self.optimizer.space.fidelities.keys())) + if not isinstance(self.optimizer.space, PipelineSpace): + fidelity_name = next(iter(self.optimizer.space.fidelities.keys())) + else: + fidelity_name = next( + iter(self.optimizer.space.fidelity_attrs.keys()) + ) + fidelity_name = f"{NepsCompatConverter._ENVIRONMENT_PREFIX}{fidelity_name}" count = sum( trial.config[fidelity_name] for _, trial in trials.items() diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 4b8172810..747cfe33d 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -199,7 +199,8 @@ def add_resolved(self, original: Any, resolved: Any) -> None: f"Original object has already been resolved: {original!r}. " + "\nIf you are doing resampling by name, " + "make sure you are not forgetting to request resampling also for" - " related objects." + "\nOtherwise it could lead to infinite recursion." + " related objects." + + "\nOtherwise it could lead to infinite recursion." ) if isinstance(original, Resampled): raise ValueError( @@ -248,7 +249,8 @@ def sample_from(self, domain_obj: Domain) -> Any: if self.was_already_resolved(domain_obj): raise ValueError( "We have already sampled a value for the given domain object:" - f" {domain_obj!r}." + "\nThis should not be happening." + f" {domain_obj!r}." + + "\nThis should not be happening." ) # The range compatibility identifier is there to make sure when we say @@ -1241,6 +1243,38 @@ class NEPSSpace(PipelineSpace): return NEPSSpace() +ONLY_NEPS_ALGORITHMS_NAMES = [ + "neps_random_search", + "neps_priorband", + "complex_random_search", + "neps_hyperband", + "complex_hyperband", +] +CLASSIC_AND_NEPS_ALGORITHMS_NAMES = ["random_search", "priorband", "hyperband", "grid_search"] + + +# Lazy initialization to avoid circular imports +def _get_only_neps_algorithms_functions(): + """Get the list of NEPS-only algorithm functions lazily.""" + return [ + algorithms.neps_random_search, + algorithms.neps_priorband, + algorithms.complex_random_search, + algorithms.neps_hyperband, + algorithms.neps_grid_search, + ] + + +def _get_classic_and_neps_algorithms_functions(): + """Get the list of classic and NEPS algorithm functions lazily.""" + return [ + algorithms.random_search, + algorithms.priorband, + algorithms.hyperband, + algorithms.grid_search, + ] + + def check_neps_space_compatibility( optimizer_to_check: ( algorithms.OptimizerChoice @@ -1281,38 +1315,15 @@ def check_neps_space_compatibility( inner_optimizer = inner_optimizer.func only_neps_algorithm = ( - optimizer_to_check - in ( - algorithms.neps_random_search, - algorithms.neps_priorband, - algorithms.complex_random_search, - ) - or ( - inner_optimizer - and inner_optimizer - in ( - algorithms.neps_random_search, - algorithms.neps_priorband, - algorithms.complex_random_search, - ) - ) + optimizer_to_check in _get_only_neps_algorithms_functions() + or (inner_optimizer and inner_optimizer in _get_only_neps_algorithms_functions()) or ( - optimizer_to_check[0] - in ( - "neps_random_search", - "neps_priorband", - "complex_random_search", - ) + optimizer_to_check[0] in ONLY_NEPS_ALGORITHMS_NAMES if isinstance(optimizer_to_check, tuple) else False ) or ( - optimizer_to_check - in ( - "neps_random_search", - "neps_priorband", - "complex_random_search", - ) + optimizer_to_check in ONLY_NEPS_ALGORITHMS_NAMES if isinstance(optimizer_to_check, str) else False ) @@ -1320,35 +1331,19 @@ def check_neps_space_compatibility( if only_neps_algorithm: return "neps" neps_and_classic_algorithm = ( - optimizer_to_check - in ( - algorithms.random_search, - algorithms.priorband, - ) + optimizer_to_check in _get_classic_and_neps_algorithms_functions() or ( inner_optimizer - and inner_optimizer - in ( - algorithms.random_search, - algorithms.priorband, - ) + and inner_optimizer in _get_classic_and_neps_algorithms_functions() ) or optimizer_to_check == "auto" or ( - optimizer_to_check[0] - in ( - "random_search", - "priorband", - ) + optimizer_to_check[0] in CLASSIC_AND_NEPS_ALGORITHMS_NAMES if isinstance(optimizer_to_check, tuple) else False ) or ( - optimizer_to_check - in ( - "random_search", - "priorband", - ) + optimizer_to_check in CLASSIC_AND_NEPS_ALGORITHMS_NAMES if isinstance(optimizer_to_check, str) else False ) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 53322fb3a..72493af1e 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -89,9 +89,13 @@ def __str__(self) -> str: def __eq__(self, other: Fidelity | object) -> bool: if not isinstance(other, Fidelity): - raise ValueError("__eq__ only available to compare to Fidelity objects.") + return False return self._domain == other._domain + def __hash__(self) -> int: + """Get the hash of the fidelity based on its domain.""" + return hash(self._domain) + @property def min_value(self) -> int | float: """Get the minimum value of the fidelity domain. @@ -580,13 +584,26 @@ def __str__(self) -> str: def __eq__(self, other: Categorical | object) -> bool: if not isinstance(other, Categorical): - raise ValueError("__eq__ only available to compare to Categorical objects.") + return False return ( self.prior == other.prior and self.prior_confidence == other.prior_confidence and self.choices == other.choices ) + def __hash__(self) -> int: + """Get the hash of the categorical domain based on its attributes.""" + try: + choices_hash = hash(self.choices) + except TypeError: + # If choices are not hashable (e.g., contain mutable objects), use id + choices_hash = id(self.choices) + return hash(( + self._prior if self._prior is not _UNSET else None, + self._prior_confidence if self._prior_confidence is not _UNSET else None, + choices_hash, + )) + @property def min_value(self) -> int: """Get the minimum value of the categorical domain. @@ -775,7 +792,7 @@ def __str__(self) -> str: def __eq__(self, other: Float | object) -> bool: if not isinstance(other, Float): - raise ValueError("__eq__ only available to compare to Float objects.") + return False return ( self._prior == other._prior and self._prior_confidence == other._prior_confidence @@ -784,6 +801,16 @@ def __eq__(self, other: Float | object) -> bool: and self._log == other._log ) + def __hash__(self) -> int: + """Get the hash of the float domain based on its attributes.""" + return hash(( + self._prior if self._prior is not _UNSET else None, + self._prior_confidence if self._prior_confidence is not _UNSET else None, + self.min_value, + self.max_value, + self._log, + )) + @property def min_value(self) -> float: """Get the minimum value of the floating-point domain. @@ -970,7 +997,9 @@ def __str__(self) -> str: def __eq__(self, other: Integer | object) -> bool: if not isinstance(other, Integer): - raise ValueError("__eq__ only available to compare to Integer objects.") + print(other) + print(self) + return False return ( self._prior == other._prior and self._prior_confidence == other._prior_confidence @@ -979,6 +1008,16 @@ def __eq__(self, other: Integer | object) -> bool: and self._log == other._log ) + def __hash__(self) -> int: + """Get the hash of the integer domain based on its attributes.""" + return hash(( + self._prior if self._prior is not _UNSET else None, + self._prior_confidence if self._prior_confidence is not _UNSET else None, + self.min_value, + self.max_value, + self._log, + )) + @property def min_value(self) -> int: """Get the minimum value of the integer domain. @@ -1157,7 +1196,7 @@ def __str__(self) -> str: def __eq__(self, other: Operation | object) -> bool: if not isinstance(other, Operation): - raise ValueError("__eq__ only available to compare to Operation objects.") + return False return ( self.operator == other.operator and self.args == other.args diff --git a/neps_examples/basic_usage/algo_tests.ipynb b/neps_examples/basic_usage/algo_tests.ipynb new file mode 100644 index 000000000..93643fdd7 --- /dev/null +++ b/neps_examples/basic_usage/algo_tests.ipynb @@ -0,0 +1,219 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "938adc12", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "c:\\Users\\Amega\\Git\\neps\\.venv\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled, Integer, Fidelity\n", + "import neps\n", + "\n", + "# Define the NEPS space for the neural network architecture\n", + "class SimpleSpace(PipelineSpace):\n", + " int_param1 = Fidelity(Integer(1,100))\n", + " int_param2 = Integer(1,100, prior=50, prior_confidence=\"medium\")\n", + " # int_param3 = Integer(1,100)#, prior=50, prior_confidence=\"high\")\n", + " cat = Categorical(['option1', 'option2', 'option3'])#, prior=0, prior_confidence='low')\n", + "global_values = []\n", + "def evaluate_pipeline(int_param1, int_param2, *args, **kwargs):\n", + " # Dummy evaluation function\n", + " global_values.append(int_param1)\n", + " return - int_param2/50 +int_param1" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "89427fd0", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Grid search does not support priors, they will be ignored.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# Configs: 20\n", + "\n", + " success: 20\n", + "\n", + "# Best Found (config 18):\n", + "\n", + " objective_to_minimize: -0.5\n", + " config: {'SAMPLING__Resolvable.int_param2::integer__1_100_False': 75.0, 'SAMPLING__Resolvable.cat::categorical__3': 0, 'ENVIRONMENT__int_param1': 1}\n", + " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\basic_usage\\neps_test_runs\\algo_tests\\configs\\config_18\n", + "\n" + ] + } + ], + "source": [ + "from neps.optimizers.utils.grid import make_grid\n", + "from pprint import pprint\n", + "from functools import partial\n", + "\n", + "# pprint(make_grid(SimpleSpace(), size_per_numerical_hp=2, ignore_fidelity=False))\n", + "\n", + "neps.run(\n", + " evaluate_pipeline,\n", + " SimpleSpace(),\n", + " root_directory=\"neps_test_runs/algo_tests\",\n", + " overwrite_root_directory=True,\n", + " optimizer=partial(neps.algorithms.neps_grid_search, ignore_fidelity=True, size_per_numerical_dimension=5),\n", + " evaluations_to_spend=20\n", + ")\n", + "neps.status(\"neps_test_runs/algo_tests\",print_summary=True)\n", + "print()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6197a65", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAGdCAYAAAA44ojeAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAASdRJREFUeJzt3Qu81NP+//FP95BKd+lCkUKRorZyLZKOWx3noNOppIhCITouCck9pItrudQJh0KIFArd6aakklPRhZJUqp3m/3iv3/87Z2b2zG5fZvZ8Z+b1fBh7z8x3z3znO9Osz/ezPmutYoFAIGAAAAA+UjzZOwAAABCJAAUAAPgOAQoAAPAdAhQAAOA7BCgAAMB3CFAAAIDvEKAAAADfIUABAAC+U9JS0P79++2nn36yQw891IoVK5bs3QEAAHmguWF///13q1mzphUvXjz9AhQFJ7Vr1072bgAAgAJYt26d1apVK/0CFGVOvBdYvnz5ZO8OAADIg+3bt7sEg9eOp12A4nXrKDghQAEAILXkpTyDIlkAAOA7BCgAAMB3CFAAAIDvpGQNSl6HMu3bt8/+/PPPZO8KgCQrUaKElSxZkmkJgBSSlgHK3r17bcOGDbZr165k7woAnzj44IPt8MMPt9KlSyd7VwBkYoCiSdzWrFnjzpg0EYy+jDhrAjKXsqk6afn555/dd8MxxxxzwAmiACRf2gUo+iJSkKJx1jpjAoCDDjrISpUqZf/973/dd0TZsmWTvUsADiBtTyM4QwIQiu8EILXwLxYAAPgOAQqiUt3OpEmTct2mW7dudskll5gf9u/bb7+1li1butT9SSedlJR9SgU//PCDO3YLFy5M9q4AQGbVoORm2NTviuy5+p3bIF/bq7Hftm1bjqDg008/tbPPPtt+/fVXq1ixYvC6Rw1yvXr17MYbb7RevXrFbf81Cuqwww4LNmpHHXWUff31175p/EP3TwYNGmSHHHKIrVixwsqVK5fUfQMAFF5GBSjpRA2x1iH6448/7N1337XevXtb/fr1rU2bNnF5/Bo1apifRe7f6tWrrUOHDla3bt0CP6aKJxmCemAcJwBFgS6eFFWtWjXXSCuzccMNN7ifX331VcxhllWrVrX//Oc/wduUCdGcEJ7PP//cypQpE5w7JrQLRY8tTZs2dbefddZZYY//6KOPuseqXLmyXX/99ZadnZ2vbqGbbrop7DH1u17TgAEDrFKlSu513nPPPWF/E7p/+n3BggV27733ut+9bZcsWWLnnHOOG8GhfVOGaceOHTn2ZciQIW5I+rHHHhvsAnn99dft9NNPd397yimn2HfffWfz5s2z5s2buwxN+/bt3bDVWJTx6ty5szvuegwNbR0zZkzw/ttuu80aNGjgRpopA3bXXXeFHTe9Br1HL774otWpU8c953XXXecmHnz44YfdMdFnQPseeVxGjRrl9k/Pq8cOfd+jWbp0qdtez1G9enXr0qWL/fLLL2HvR58+fdz7VKVKFWvXrl2ujwcA8UCAkuIUfEyZMsXWrl1rLVq0iLqNGq0zzjjDdQ95jefy5ctd9kW1G/LZZ5+5hjja0Oy5c+e6nx9//LHrWnnrrbeC933yyScue6GfL730ko0dO9ZdCkuPpS6bOXPmuAZZwcfUqVOjbqt9Ov744+3mm292v99yyy22c+dO15CqG0iBxRtvvOH2Xw1tqGnTprlslB578uTJYV1Gd955pwv6NAPplVde6QKmJ5980mbOnGmrVq2yu+++O+b+K+BYtmyZffDBB+5YK2hQ4+7RUuM6TtpGj/ncc8/ZsGHDwh5Dx1V/r/f33//+t73wwgsuS7R+/Xr3fj300ENuH3WMIp+7U6dOtmjRIhckXX755W4folG3ooI4BZ/z5893z7Vp0yb729/+luP9UNbkiy++sNGjR8d83QAQL3Tx+IgayMj6iVhT9deqVcv93LNnj5v3RQ24gpBYdBb8zDPPuN9nzJjhGiSdhStoadiwoft55plnRv1bZQFEWYjIrhUFAE8//bSbGE+PowZUjX7Pnj2tMJo0aeKCBFH2Qc+hxz333HNzbKt9UhChY+ftnxr83bt328svv+wCHdFjXHjhha5hV6ZAdN/zzz8f7LJQBkUU5HiZAtX3XHHFFe75W7Vq5W7r0aNHroGYAkYdY2Vc5Mgjjwy7X4GFR/fp+SZMmOCCII/eV2VQFMwcd9xxrvZIwdT777/vhswq46PXouAwNDi97LLL7Oqrr3a/33fffS74Gj58uI0cOTLHfuqYaD8feOCB4G16Ts0jpKyRsjzee6BAMb+yN23OcVup6tXy/TgAMg8Bio+oAdKZdiidHf/jH//Isa3O4tVwKUBRhkOZAXWHqBYlGgUfamjVLaGzbwUsXoCixvbLL78MaxzzSpkLBScedfWoa6WwFKCE0uNu3pyzsYtFGYMTTzwxGJyIggs1+mrkvQClcePGUespQp8/dNvQ23LbH70PymIoA3Peeee5rqTTTjsteP9rr71mTz31lMuSqNtJ60appiiUAhe9x6HPqWMdOp9HtP3IysrKcT3WqB1lWRTgRCss1r55AUqzZs1ivlYASAQCFB9RY3r00UeH3aZ0fjSqC9GoHi9IUCCjeoRYAYoaVwUwCk500bYKUHQGri4Q1T+ENqB5pdk5I7uTFATEosZV3VKhotWs5PdxCyo0gIn1/N5SCZG35bY/qunQrKXKdiiDoeJl1eeoXmfWrFmu62Xw4MEuS1OhQgWXPXnsscdi7oP3nPE+LgqOvKxSpNAapVjHCQAShRqUNKEza9WUxKKGTEWfb7/9tn3zzTfWunVrlyVQBkZdP+qKiNUIeRmGeKwMre4i1YmESsScHI0aNXLZAdWieFQ/4XWNFAW91q5du9qrr75qTzzxhD377LPudmWrNNrojjvucMdd3ScKZuJl9uzZOa7reERz8sknu8+DsjUKjkMvBCUAkokAJUUprb9x40bXsKkA9JVXXrGLL744179Rt46KLTU6RCl9NdaqWxk3blzM+hPRaBGNCPEKKH/77bcC77cKMlWMqdqQlStXujoTjSKJN2UoNEeMAgQ9vrox+vbt60aoeF02iaQCWgWDKqZVAKD6Ii9IUECiGhVlTdSNoq6eiRMnxu259XlQHYlqSHR8vS7AaJTV2bp1q6uxUSZN+/Phhx9a9+7d4xKQAkBBEaCkKGUBlILXma6GrF5zzTWuEDI3CkLU6EQO6Y28LZIKUNWIKtOi4bgHCoRyoy4NjTJRvYtGDf3+++/2z3/+0+JNo5HU0Krx1fP89a9/dd0sKgotCso6DRw40GWpFAQqw6WARC666CLr16+fCxoULCqjomMSL+o60nPpuRUIKihVkW00ej+VWdJnQLUy6grUcGJ1H7J2DYBkKhaILAhIAdu3b3f99jqTjyws1MgNLamuGg1WLEWmUVeesjHJWoIgHqN4Iv8mXqN++G4A/N1+R+IUCQAA+A4BCgAA8B2GGQNpJAV7bAEgKjIoAADAdwhQAACA7xCgAAAA3yFAAQAAvkOAAgAAfIcABQAA+A4BClJSt27dDjhb6qeffupmVt22bZsle/80/LdXr15uRWntUyIWSEwXbS+91G6+685k7waAJMuseVA+GVp0z3X2wHw3aC+99JINHTrUbr/99uDtkyZNsksvvTRf81toXZ3PPvvM/V6mTBmrV6+eW/fluuuuc7dp3ZVHHnnExo4d6xYb1EKAWsCuZ8+edvXVV8ds7M8++2z79ddf3TotobQSrtZv0cW77q3Oq/VctDhf+/bt7dFHH7XDDjvM4uHJJ58MOyZ6zVrXRqsG+0Hk/mmhRR1vHUe9H1WqVEnq/gFAWmVQRo0a5RYg0/z5umRlZdkHH3wQttaFVketXLmyWy23U6dObvXbUFrFtUOHDm4xN62Se+utt9q+ffvi94pSmNYHeeihh1wQUFgKNjZs2GDLli2zv/3tb+590aJx3mJyw4YNs/vuu8/dr5V+dXYfz0zDvffe655f77dWS54xY4bdcMMNcXt8reUQGSj5SeT+aZVgLe542mmnWY0aNdwCjPmlgId/KwemAHz//v3J3g0ARRmg1KpVyx588EFbsGCBzZ8/38455xy3sq2Wkxet0Pruu++65d51Bv/TTz9Zx44dw744FJzs3bvXreCqjIHOKrU0Pczatm3rGi9lUXLz+eef2+mnn+4yH7Vr13YN/86dO8O2UQCox9LZ+j333OMyJO+88467Tz+VTbnsssvcwmknnnii9ejRw2655Za4vZZDDz3UPf8RRxzhMi9du3a1r776Kub2eu6//OUvwevKhKgrRJkHj1Zufv7553N0oeh3fd6UtdDf6PLDDz8E/06f1+bNm7tjogBhxYoV+eoWUndM6GPqM6vgQ6slN2rUyAXj559/vgvIPJH717dvXxes6XGUYZI9e/a4906BuoLT1q1b27x583Lsi04CmjVr5rJheu+VLdLjKWOljJQyVM8995z7DHTv3t0dex2r0JOHaEaOHOk+F3puPYZWfPbouGt/9Dp1wqH3RkGWR8dC+/b6668HP4taNfq7775zr0HHW8flwiuusJ9/+SX4dz1uuMEuat/e7r51gFWtXMXKH3qoXXvtte47IRYdJ30+9Fk65JBDrEWLFu7YeLz3Q59rrdqs46RjDSCDApQLL7zQLrjgAvel1qBBAxsyZIj7Epo9e7ZbmfCFF16wxx9/3AUu+kIdM2aMC0R0v3z00UfujP3VV1916Xil/XUWP2LEiFy/oDJFiRIl7IEHHrDhw4fb+vXro26jRkKNobJTixcvttdee801WurCyY0aEO8YK3CYPn26/fzzz1YUfvzxRxe4qmGJ5cwzz3SvQ0GsKOBQN4jXEOkx9NrVOEdSYKJsnpc10kWBm+eOO+6wxx57zAXVylxcddVVhX5Nu3btcl1Wr7zyissOqUGMFeBp/5RRUoCvffOCkAEDBtibb77pAnUFbwoq2rVrZ1u3bg37e3X56cRg+fLlLoMp+hsdn7lz57pgpXfv3i7gVACmxzrvvPOsS5cubj+j0bFQcKT9UsCmgOSMM84I3q9gp3///m67adOmua46dTVGZiYGDRpkd955p3tOHdsrr7zSvS695pkzZ9rqH9bY4IcfDvubT2bOtG9XfmdT33rLXhk12t566y2X1YtFn+1Zs2bZhAkT3Gder1P/BlauXBn2fij7qABWJ0wK+gBkaJGsGhJ9YeiLTI2DzlKzs7NdFsDTsGFDq1OnjvtyEf1s3LixO1vz6AtZyy97WZhYZ1DaJvSSrtQIKHjTF380yq507tzZnT0rUFSD9NRTT9nLL7/sutiivU8KCPXFrsBRFEQqOFGgogZPZ7AHOtv2qJFVUBp6iXa2etttt7n7FBjpb3S2reeNRWfhv//+u3399deuK0ON/s033xwMUPRTZ9BqxKN1p5QuXTqYNdJFwZ5HgbQCIJ1dq7FX0BztWOWHPuujR492mYKTTz7ZNaJqyKPR/imroX3SvlWtWtX9u1GXqWqBFKhr35QF0fFSoB9KQcS5555r9evXd0W2oqyXAgN9BgYOHOiyIApYFKTpNmUlt2zZ4t73aPSeKRuhzEjdunWtadOmYV1wCoCV/dTx1ufxxRdftCVLlrgTjFAKyvRvWJmkG2+80X0P3HXXXdaqVSv3mN2uvNI+/fKLsL/Re/XcsCfs+IYN7YJzz3WvT5/haN0ya9evdyc6ysrqM6JjoOdUdke3h74fygjp38Oxxx7rPgsAMixA0ZeUGh6lUdWwTZw40X25bty40X3xRNYFKBjRfaKfocGJd793XyxqlPUl711Cz47Tkc4EdYasM+ZIixYtcint0ABBDYS+3NesWRPcTl/WXoCgRkvdbzrLFr1fS5cudZktZRM2b97ssmOxCmRD6axYXR6hl5o1a+bYTrVFuk8NpNdwq3vPy5BE0udGja4CEX3G9FlSXYwClh07driMioKMgvCyDqI6ENFrLgw1gGosQx83P4+pbJAaVTXknlKlStmpp56a431XEJTba1Lgo24YBf+R/65i7ZMCHgUm6gJUpkV1QqHZFmUnrrjiCne/6s28bqnIYDR0P7znDNuPKlXDunjc3xx3XFgAoRMcvcfr1q3LsZ9Lly93nxllbEM/8/o8hHY56fMSui8AUl++K/V0dqKGR106//nPf1xtgTdiJFF0hqh0s0cZlHQOUpRqV9Ch1636hVD6Ir/mmmuiFpwqW+VRlkVdGwpQ1HgqRR9K11UzoIuyMcqyqKHS36guJRbdFxmERiv41Nm8l+3QGb1qStQQqSA3NMsWSt03ClAU/CoYUbZAZ+bq+tFnTBmVglDD71EmR2IVUXrHKXQEjgKJ3B7Te9xErSSsTEdenj8/r1MZHXXL6Hir61UZF9UqqftJ768CVgUwyuooANXjnHDCCTm6YqM9Z+RthSlY3bFzpwvAlJkJzYqJAhWPPufe82dvyhmUlapOlw+Q9gGKzlS8hkd1JvpCU3/z3//+d/flpeLC0AZMo3iU1hb9VJ95KG+Uj7dNNGqwdMkkqjlQal0BYSh1JyjNHq2rI5QyTQfaJpSyKhJZbBsvXuPyxx9/xNxGQYm6EhTwqMbAC1o0+kjFl9HqT0I/l7GyM/mh7hdRrYg3JDoRc5Yo+6J9/uKLL1wg4AVC+vfkDddONB1nBYu6qEtR/25Vm6T3QXUpCk7UrSIKEuNl8bJl7nOgoEKUyVOwEe2k46TGjd37qkyQty8AMkOhJ2rT2ZFqRBSs6MwptB9eX3JKCevMWfRT6fvQtPPUqVNdCtlrIGHBNLmyIOqbj6ztUA2Fah7UcCoV//bbbx+wSDaURmtomPGcOXPcfCU6i9YwZKXRVTcUD6onUbedGnoFperyUeOvGoHcMkf6u8mTJweDEf1U94OyQNq/WNQFodej0SW//PJLgc/aFdSpoVQ2Qcf2vffecwW2iciKqMtNx0UFqgo61RWnbhaNqEo0HWN9tvQZ0mdANUw6ZgqIFZipy+jZZ5+1VatWuaAlNINZWDqR6dW/ny1bscI++PhjFxzp8xuZ5ZMG9eu7fwf//Oc/XTGtujH1eVK3r94bAOkrXwGKuhxUvKhGQIGGrqtx0xeIztj1xaovMqXxlZLVkEcFJS1btnR/r5EFCkTUlaBaCg3TVKGfGsdMy5DkhYoHIxta9bOru0MZBZ1RqhBR6flodSCxqPtIo2qUxlejr246BSZK9Rdkfo5otE8KKrRfKsRUg6zHV8MXixpGBWYKZLxASUGLjsGB6k9UOKksjT5f+vuCDjNVkK2MzbfffuuOteqB7r//fktUlkzFqPr3oMyYggH9m4jXZHa5UbZEDb4Kp9WNpoJfve7jjz/eBQoqgNe/YXXrqH5Jxbzxcvbpp9vRR9WzNpdeYp2v6WUXXXSRCwhjUTGsAhR18SmA0vBtZZpCuzQBpJ9igXx0nCsAUYZEZ8UKSPQFrjN6FdyJRkboS0RfdMqqqCFUsWZo943O1nTmqMBGjZYaR31R56dhVA2Knl91MMq+hNI+6CxLtRIa2QAgeSLrQTQPyrbtv9mbY1+KWR8S+Tf5rR+JVYPCdwOQfLm135HydbocOfwxkv7Ra04TXWJRf/v777+fn6cFAAAZJrPW4gHgO9EyHgBAgAKgyLwQUfQNALEQoADwtcLWpADI0GHGAAAA8UaAAgAAfIcABQAA+A4BCgAA8B0CFAAA4DsEKIhKK8NOmjQp12200rKmHS8qWm9HqyJnCs22rPdBC3Ai91E+oRcA6SGjhhmPXDiyyJ7rupOuy9f2auzVEEUGBWqkzj77bPv111/d+ine9dDZe+vVq2c33nij9erVK277H7qar9Ze0vTgX3/9tVthOVm0/oqWRwAApL+MClDSiVaK1joGWrZeC/9pfaP69etbmzZt4vL4oesnJZtWvy1durRbBDAej5NJMvE1A0gPdPGkqGrVqrkgQpmNG264wf386quvom6r9SDVuP/nP/8J3qZMiFYb9nz++eduReldu3bl6OLRY4tWTtbtZ511VtjjP/roo+6xtFKxVqbOzs6Oud9atVbP/cwzz1jt2rXt4IMPtr/97W9u4ajIrqMhQ4a41ZC1gm20Lh6tWHzxxRdbuXLlXLCmx9m0aVOO53r++edzXSBOC1hqZWdljJSh0Yq+3npRf/75p1skU39/0EEHuX158sknw/7e298HHnjAqlev7jJdWol63759duutt1qlSpWsVq1ablVej7JSOpZaNfi0005z+6aVg7VSdW70PmkVa+2Ljp/e+507dwbv1zG677773Oq/OibxzKoBQFEiQElxCj6mTJniGusWLVpE3UYN4RlnnOG6h0TdRcuXL3fZl2+//dbdpobxlFNOcQFDpLlz57qfH3/8sev6eeutt4L3ffLJJ7Z69Wr386WXXrKxY8e6S25WrVplr7/+usv8aN/VdXTddeFdYlo1W1miqVOn2uTJk3M8xv79+11wsnXrVrfv2u7777+3v//97zme680333T7vHDhwqj7o6BKq2/PmDHDlixZYg899JALerznUXDxxhtv2LJly+zuu++2f/3rX27/Q02fPt1++ukn9xiPP/64DRo0yP7yl7+4oGfOnDl27bXX2jXXXGPr168P+zsFMFoBXMcgKyvLBUpbtmyJup86zueff7516tTJFi9ebK+99poLWPr06ZMjYDzxxBPdY9511125vhcA4Fd08fiIGmKvYfToDD4aNZqihlWNqM7YFYTEoqyHshaiRlTZEGVgFLQ0bNjQ/TzzzDOj/q3XtaIMSWTXjxrgp59+2kqUKOEep0OHDi646NmzZ8x90bL3L7/8sh1xxBHu+vDhw93fPfbYY8HHVyZDmY9Y3RN6DgUTa9ascZkE0WMq+6FaFQVbXheHbs+te0jBnRr9xo0bu+uq6fGUKlXKBg8eHLyuTMqsWbNcgKKMjUdZkqeeesqKFy/usiwPP/ywy0YpmJGBAwfagw8+6AKKyy+/PPh3Ci703DJq1CgXsGnV8AEDBuTYz6FDh1rnzp3tpptuctePOeYY95x63/S3XobonHPOcUEPAKQyMig+ouJXneWHXtRIRzNz5sywbdS9oEYqFjViygD8/PPPLuOggEUXBSbqkvnyyy9zdN3khQICBScedfVs3pz7SIo6deoEgxNR5kBBljImHgULudVOKAOkwMQLTuS4445z3Su6z1O3bt0D1q6om+T++++3Vq1aucyHshOhRowYYc2aNXOPowDy2WefdUFN5HFQcOJRV48X8IiOkQK8yGOj1+4pWbKkNW/ePGz/Qy1atMhlp7QP3qVdu3bu2ClQ8+gxACDVEaD4iLIGRx99dNgltCEPpTN53a+GsXv37talSxdXsxGLGkud5Ss4CQ1Q9LsyDgpSVAuRX8owRHYnqcEsrHiN1snL41x99dWue0jHUFkZNfDK6ohqRG655RZXh/LRRx+5gFDHW5mZAx2HeB+bHTt2uG6i0ABWQcvKlStdgXR+XjMA+B0BSprQGbpqSmJR46jiyrffftu++eYba926tTVp0sR1EanrR41yrIbNy2TE6m7KL2UfVK/hmT17drBrJK8aNWpk69atcxePMkQaqq1MSn4pE6M6EdWqqHvkueeec7d/8cUXLnBTjYy6xRQUqhYkXvTaPSqqXbBggXtt0Zx88snuNUYGsbowUgdAuiFASVHqKti4caMbgaICzldeecUVjeZGGZN///vfbmSLugcUFKhuZdy4cTHrT7wRQxo1ovoIjZIJHXFTEKqV6Nq1qzv7V1eVulhUz5Gfoc1t27Z1WSHVZGj0kgp5NXJFryO/XRyq6fjwww9dN4keSwW/XpCgOo/58+e7+7/77jtXdKqMU7yo+2jixImuWFnFuipgvuqqq6Jue9ttt7muONWtKHuizIkCzsgiWQBIBwQoKUrZBtV76OxZDZdS/163RCxqvJUFCa010e+Rt0VSbYSKMZVp0bDfAwVCB6J97tixo11wwQV23nnnuUzOyJH5m0RPGSE1zirSVZClgEXFrRrZkl96/QoOFJRolEyDBg2C+6Pjqn3V6CCNktIIm8gRR4WhwlldNOpGBbTvvPOOValSJeq2Ok7qklOgpGyYMjoaVaT3BADSTbGAxqmmmO3bt1uFChXcmbzmeogcIaIz4dzmvUDyaG4Sza8Sa8hvpvDL7LyFFTm1fKnq1XK9Px4K+hy7s7Ptvz/+aJWWLLaaV18d9/0CULj2OxIZFAAA4DsEKAAAwHcIUFDkXTyZ3r3jTUmv3tVU7t4BgEQiQAEAAL5DgAIAAHyHAAUAAPgOAQoAAPAdAhQAAOA7JZO9AwBSZ+I1ACgqZFCQkrp162aXXHJJrtt8+umnbkp8LSBYVEOoM23Y8DHNm9tTzz6T7N0AkIYyKoPy8/Cni+y5qvbtk+8G96WXXrKhQ4fa7bffHrxd08Jfeumlbs6MvNK6OlqzRcqUKePWqNGCct4aMlp75pFHHrGxY8e6xQa1EKAWxevZs6ddHWMKcDX2Z599tlvMrmLFijnm9NCCe7p41/W4ogUJq1evbu3bt7dHH33UrZ0TD08++WTYMdFrVnDwxBNPWLLccsst1rdv36Q9PwCkEzIoPqK1gx566CEXBBSWgo0NGzbYsmXL3ErBWgxPKxnL4MGDbdiwYXbfffe5+7V6b69eveKaabj33nvd869du9atljxjxgy3anG8aC2HyEApWRQo7du3z60QXbly5UI9VnZ2tvm9Cyj0Ulh79+6Ny34BSD8EKD6iFXlr1Kjhsii50aq3Ws1WmY/atWu7hn/nzp1h2xx88MHusZQ9UdeDMiRaKVf0U9mUyy67zC1Yp5V0e/To4TIA8XLooYe65z/iiCNc5qVr16721Vdfxdxez/2Xv/wleF2ZEHXPTJkyJWwV5Oeffz5HF49+V8ZIWRX9jS5ajM+zYMECa968uTsmp512mq1YsSLmfujv9PcTJkxw2ypoPOGEE4IZqdCuow8++MCaNWvmslR6TyK7ePbv3+8CtVq1arltdF/o6/GeSyswa6VpPZeCuWgBkB67Tp067nG0enFosPfKK6+41+cd8yuvvNI2b96cY38//PBDtwKyPjfnnHOO20avQas4a9Eu/d2uXbuCf9f20ktd5k0XBYRaZXnQQw/mms1TkKssXNWqVd1jntepoy365pvg/fc+8og1b3OOvTjuVWtwSnM7tG6dmI8FILMRoPhIiRIl7IEHHrDhw4fb+vXro26zevVqO//8861Tp062ePFi17ipcVQjkhs1St7Zqhqx6dOn288//2xF4ccff7R3333XWrRoEXMbNdB6Hep+EgUEahDVuHqPodeurpxICkyysrKCWSNdFLh57rjjDnvsscds/vz5VrJkSbvqqqsOuM+33nqr3XzzzW61YT32hRdeaFu2bAnbRl1xDz74oC1fvtyaNGkSdb/0vOra0nvVrl07u+iii2zlypU5HufGG290j6NtIr355psu4/XMM8+4v1W3X+PGjcOyLsqGLVq0yN2nwEdBWyQFOU8//bR9+eWXtm7dOpdZUyA4fvx4e++99+yjjz6yES+8EPY36nbUMZs7d657PU+OHu2Ci1j+evHFtnHdOnv31XE2+8OPrGnjJnb+ZX+1rSFZwdVr1tjEye/Zay+OsXnTplmyuntDLwD8hwDFZ1RvojPtQYMGRb1f2ZXOnTu7eg9lRXSW/9RTT9nLL79su3fvzrG9GvxXX33VNZA6a5bHH3/cBScKVNSwXnvtte5MOi+UDVBXRuhF3TiRbrvtNnefAiP9jc7g9byxKCP0+++/u4BAZ+jqElKA4AUo+qlsjLIokXR2X7p06WDWSBcFe54hQ4a4AOi4445zwYAa6GjHKpQCPgWByi6MGjXKPccLEY23siPnnnuu1a9f3ypVqpTjMRSY6Dhcfvnlduyxx7ruu2h1MnovO3bs6LJZhx9+eI7H0fHVa1KGTVmUU0891QVjHgVcqvFRtqxly5bu86D3c8eOHWGPc//991urVq1cFkUZMwWBem26ruP/17/+1T778ouwv1Ggp+BI+6/P3XU9etiTzzwb9Zh9MWeOzfv6a5vw3PPW7KST7Jh69eyhe+6xiuXL21uT3w1utzc7214cPtyaNm5sTY47Ptf3AUDmIkDxITVkOnPVGXUknSWruDU0QNBZt7oT1qxZE9xu5MiRwQBBjVm/fv2sd+/e7j411EuXLrXZs2e7xk2pfmUIYhXIhpo5c6Zb7C/0oi6HaBkI3afAaNr/P0vu0KFDMEMSSfUk6mpSILJkyRIXcKguRgGLGlo1pgoyCiI0u+EFAKFdINEoa+JRBkFdKJHvh26LZfv27fbTTz+5gCCUrufncURdcX/88YcLQPReTpw40dW8hHZh6f1T8KJuHu84RQaOocdBhcsK6PSYobdt/uWXsL9RwKPgMni9eXNbteb7qO/j4m++sR07d1qNRg3tsHpHBS9r1q611T/8X9G01K1Vy6pWqZLrawaAjBrFkyrOOOMMF3QMHDgwR6pejfU111wTteBUDZRHZ7vq2lCAokZZo2lC6fopp5ziLjqDV5alS5cu7m90Jh+L7ossTlUDHkndM162Q5keZQ3U6KsgV5mAaNR9owBFdRZqZJWVUAZDXT8KUJRRKYhSpUoFf/caWwV0hXXIIYcU+jHy8jjKYqhu5uOPP7apU6e6+iGNwtIxUbedPiu6qH5FtR8KTHQ9sgA18jiEXvduK8xxUXByePXqNvWtiTnuUxbFo8AIAA6EAMWnVNug7gCl1kOdfPLJbuRNtK6OUOqSONA2oZRVkchi23jxulyUCYhFQcmLL77oAh7V2XhBi0Yffffdd1HrTzzKuMTKzhSEsksKFEXZCmUpDlTnE0oFososffHFF2GZH11XF01+KdBUlkQXjchq2LChyzSpO0y1Mfq8eHU3qrWJlzlz5oRfX7DAjj6qXlgXmqdpkya2cfNmK1mihB0ZEiwDQEEQoPiUiiCVBVE9QSjVNCjtrsZSXTI6+1bAojNrFUDmhWoN1NWg+hXVNqhrSNmaBg0auIYvHlRPsnHjRteAqiBzwIAB7uxezxmLAgL93eTJk12DKwpKtL/KAmn/YtHcK2pMVSCqrq1oNSH5MWLECJf5UQZHNRga+p2X4trIbi7VEqlGRcHmmDFjXLdXtJE6uVGXnoIvFRkr+6BslwKWunXruoyHgjMVVquWSF13KpiNF2Vj+vfv77J2GoU18oUX7OF7Bkfdts0ZZ7guoL9272ZD77rb1aBs2LTJ3v94ql3S/gJXlwIAeUUNio+pCDMy5a46AqX2lVFQYaMKHO++++6odSCxKP2vUTU6G1ejryHACkw0iiNad01BaJ8UVGi/NHxYgZQeP7d5QjSJmwIzBTJeoKSgRcfgQPUnGqass3plgrxujsJQgKSL6mLUxaSh2eq2yg91w6lxV9eUXpeGGOtxFPjkh7rUnnvuORdU6v1XV4/ePx1LvVYFMG+88YZ77dpnFefGyz//+U+X9VLWR5mbPprMr0uXqNuqi+idcePt9JYtredNN9rxrU6zf1x7ja1dv96qVa0at30CkBmKBfIzRalPqABRXRi//fabS6WH0ugMZQRUK6F5JYD8UAZGnx0V52batPUSOvma5kFpeuopYaOO4jE5W2FFrg+U133anZ1t//3xR6u0ZLGV+mN3oWZ+BhD/9jsSGRQAAOA71KAAyHiRk7WRUQGSjwAFiCi2TcFez4T4eOLEHN0pAFBU8tXFo1lMNW+GJoOqVq2aWwslcl0Tjbrw1kPxLhpdEEoFjJq0SyMS9Dga7RA68RQAAMhs+cqgaPSIKvkVpCig+Ne//mXnnXeeG+YaOtmUZrvUCJRoEzNpuKSCEw1v1ZTjWjdFIwU0aZTWoQEAAMhXgBK6EqtoeKMyIJrEypvUSrw1UaLRUFMFNBoqqam1NVJC8zZofg8tZqY5HeKBND2AUAHTd0LA/QfA/wo1ikfDhCRyUixNRKU5I7RMvSYAC13CfdasWW5OCAUnofNyaOjRNyHLsofas2ePuz/0Eos3fXfocwLA7r3ZWuPASkQsAQAgzYpkNXmW1nDR5FEKRDxXXnmlm+FSE3RpoThlRlSn8tZbb7n7NbtoaHAi3nXdF6v2ZfDg6LNXRtJkXZrYylsMTtmc0MXOAMSWnZ0ddv3PiFWfI+9PhvzukzInCk5+3rLFym7YaMXjsA4TAB8HKKpF0bTammUzlFag9ShTotlE27RpY6tXr3ZTfheEsjCakdOjDIq37kg0XvfSgVasBRDuz+2/h10v8fv2XO9PhvzvU8BlThSclP/xxwI9J8OQgRQJULQOjNZLmTFjhtWqVSvXbbV+iKxatcoFKAoe5s6dG7bNpk2b3M9YdSta3VaXvFLGRIGR6mP8cMYH+MHWV1/NcVulf/wj120OdH8y5HufAua6dcicAGkcoKjwtG/fvjZx4kT79NNP3ZTgB6LF0UQBg2RlZdmQIUNcdkMBhGihO015662oGy/q7om26iqQiSKnd5fI5SAitznQ/cngx30CkOQARd0648ePt7ffftvNheLVjGhefa2uqm4c3X/BBRe4hcxUg9KvXz83wkeLnImGJSsQ6dKliz388MPuMe6880732PnJkgBAokR26QDweYAyatSo4GRsobSMfLdu3dwQYQ0f1uJiO3fudHUinTp1cgGIRxkNdQ/17t3bZVM0f4pW0w2dNwWAP9BQA0iZLp7cKCDRZG4HolE+77//fn6eGgAAZBBWMwYAAL7DYoEAkICuMIYiA4VDBgUAAPgOAQoAAPAdAhQAAOA7BCgAAMB3CFAAAIDvEKAAAADfIUABAAC+Q4ACAAB8hwAFAAD4DgEKAADwHQIUAADgOwQoAADAd1gsEADisDgggPgigwIAAHyHDAqQpmf1Vfv2Sdq+AEBhkUEBAAC+Q4ACAAB8hwAFAAD4DgEKAADwHQIUAADgOwQoAADAdwhQAACA7xCgAAAA3yFAAQAAvkOAAgAAfIcABQAA+A5r8QBpihV3AaQyMigAAMB3CFAAAIDvEKAAAADfoQYFSFHUmABIZ2RQAACA75BBAVIEGRMAmYQMCgAA8B0CFAAA4DsEKAAAwHcIUAAAgO8QoAAAAN8hQAEAAL5DgAIAAHyHAAUAAPgOAQoAAEjtAGXo0KF2yimn2KGHHmrVqlWzSy65xFasWBG2ze7du+3666+3ypUrW7ly5axTp062adOmsG3Wrl1rHTp0sIMPPtg9zq233mr79u2LzysCAACZFaB89tlnLviYPXu2TZ061bKzs+28886znTt3Brfp16+fvfvuu/bGG2+47X/66Sfr2LFj8P4///zTBSd79+61L7/80l566SUbO3as3X333fF9ZQAAIDPW4pkyZUrYdQUWyoAsWLDAzjjjDPvtt9/shRdesPHjx9s555zjthkzZow1atTIBTUtW7a0jz76yJYtW2Yff/yxVa9e3U466SS777777LbbbrN77rnHSpcuHd9XCAAAMqsGRQGJVKpUyf1UoKKsStu2bYPbNGzY0OrUqWOzZs1y1/WzcePGLjjxtGvXzrZv327ffPNN1OfZs2ePuz/0AgAA0leBA5T9+/fbTTfdZK1atbITTjjB3bZx40aXAalYsWLYtgpGdJ+3TWhw4t3v3Rer9qVChQrBS+3atQu62wAAIJ0DFNWiLF261CZMmGCJNnDgQJet8S7r1q1L+HMCAIAUqUHx9OnTxyZPnmwzZsywWrVqBW+vUaOGK37dtm1bWBZFo3h0n7fN3Llzwx7PG+XjbROpTJky7gIA6eLn4U+HXa/at0/S9gVI+QxKIBBwwcnEiRNt+vTpdtRRR4Xd36xZMytVqpRNmzYteJuGIWtYcVZWlruun0uWLLHNmzcHt9GIoPLly9txxx1X+FcEAAAyK4Oibh2N0Hn77bfdXChezYjqQg466CD3s0ePHta/f39XOKugo2/fvi4o0Qge0bBkBSJdunSxhx9+2D3GnXfe6R6bLAkAAMh3gDJq1Cj386yzzgq7XUOJu3Xr5n4fNmyYFS9e3E3QptE3GqEzcuTI4LYlSpRw3UO9e/d2gcshhxxiXbt2tXvvvZd3BMilCwAAMknJ/HbxHEjZsmVtxIgR7hJL3bp17f3338/PUwMAgAzCWjwAAMB3CFAAAIDvEKAAAADfIUABAADpMVEbACB3TMQGFA4ZFAAA4DsEKAAAwHcIUAAAgO8QoAAAAN8hQAEAAL5DgAIAAHyHAAUAAPgOAQoAAPAdAhQAAOA7BCgAAMB3CFAAAIDvEKAAAADfYbFAAAU2b+O8sOun1DglafsCIL2QQQEAAL5DgAIAAHyHLh4AyGdXltCdBSQWGRQAAOA7BCgAAMB36OIBYCMXjnQ/jwzpyqALo2j9PPzpsOtV+/ZJ2r4AfkCAAqQ56icApCK6eAAAgO+QQQGQUEzmBqAgyKAAAADfIYMC+LRIEgAyGRkUAADgOwQoAADAd+jiAZB0FNICiEQGBQAA+A4ZFCAJKIgteJblh/8/6+11J10X18dN9QyONxuwJ17HB0gWMigAAMB3yKAAQBpk4bSO0g+XtUja/gDxRoACoEhF61IpCBY4BNIbAQqQItKlViKRGA0EpA8CFAAoAIIhILEIUACkbTdQOtWYVO3bJ2n7AiQDo3gAAIDvkEEBkCdkNZKLuXOQaQhQAGQUAi0gNRCgAIiKhhxAStWgzJgxwy688EKrWbOmFStWzCZNmhR2f7du3dztoZfzzz8/bJutW7da586drXz58laxYkXr0aOH7dixo/CvBgAAZGYGZefOnXbiiSfaVVddZR07doy6jQKSMWPGBK+XKVMm7H4FJxs2bLCpU6dadna2de/e3Xr16mXjx48vyGsA0hIZDACZLN8BSvv27d0lNwpIatSoEfW+5cuX25QpU2zevHnWvHlzd9vw4cPtggsusEcffdRlZgAgmQgOgTQdZvzpp59atWrV7Nhjj7XevXvbli1bgvfNmjXLdet4wYm0bdvWihcvbnPmzIn6eHv27LHt27eHXQAAQPqKe4Ci7p2XX37Zpk2bZg899JB99tlnLuPy559/uvs3btzogpdQJUuWtEqVKrn7ohk6dKhVqFAheKldu3a8dxsAAKTzKJ7LL788+Hvjxo2tSZMmVr9+fZdVadOmTYEec+DAgda/f//gdWVQCFKAwndh/PD/F9yL9+P6hd/2B4CPhhnXq1fPqlSpYqtWrXIBimpTNm/eHLbNvn373MieWHUrqmmJLLQFUhnruABAkqe6X79+vatBOfzww931rKws27Ztmy1YsCC4zfTp023//v3WokWLRO8OAABIxwyK5itRNsSzZs0aW7hwoash0WXw4MHWqVMnlw1ZvXq1DRgwwI4++mhr166d275Ro0auTqVnz542evRoN8y4T58+rmuIETwAUlW07iQyY0ARZlDmz59vTZs2dRdRbYh+v/vuu61EiRK2ePFiu+iii6xBgwZuArZmzZrZzJkzw7poxo0bZw0bNnRdPhpe3Lp1a3v22WcL8TIAAEBGZ1DOOussCwQCMe//8MMPD/gYyrQwKRvSwcgoRabXnXRdjttY6A0A8oe1eIA0wqiV1EdXEVBERbIAAAD5RQYFSGFkTACkKwIUwAcINAAgHF08AADAdwhQAACA7xCgAAAA3yFAAQAAvkOAAgAAfIdRPEARzDZ7JKN0ACBfCFCABAid2p7gBPEehs7MssgEdPEAAADfIYMCFHJxQKAgnx8vs0Y2BIiODAoAAPAdAhQAAOA7dPEAQIaK1mV53UnXJWVfgEgEKECcMawYhVooctt/c25UsW6R7Q/gF3TxAAAA3yFAAQAAvkMXDwCkercQkIYIUAAgDVEAi1RHFw8AAPAdMihADMwaCwDJQ4CCjET6G5mIoBuphAAFABJk9vdbgr+vmvqdlama1N0BUgoBCtIO2REASH0EKEABHPnGnLDrP1zWImn7AiA9DZv6Xdj1fuc2sExCgAIgfSV52vgNe5YGf/9q+2uWVbVykT03kOoIUADYrNX/VytR8tddwdtqHXZwvh9nfcjfF+ZxgLTyydCct509MBl7klKYBwUAAPgOGZRUjL6JvH2TaSA7kCCs6OubInMKzJEsBCjIiNE3eZr/ITIQPKxCXJ4bAJB/BCgovDTpXx25bXH4DYednqxdQZxs370vx23lLfkZOCELF0dkmdMSAQqQwiKLUmnsik6mHPu0mVeIICblEKAAQJqIx/w8oVkeyarP0GgkBwEKkMLK79kQcUv9JO0JfFlovGZv+G1H+azbMgWzGpk+eVpRIkBBRoo8S5QsBt0D6VsXh5RDgOKzCDnyud3z8y75JnUeOpFZ2HDY0DPVyLPUgg6Zjfw7htmmp2ifDz/Vu6yZmfO2IqxBIWORGYMUoqHpSxOp+I84cp+LcqXXWtsX5Lyx4kFF9vyZUmBZJA3ntg0JD+KizZALILEIUJDykhnopAKmn8/d9o2rc9xWvga1PECy0esOAAB8hwwK/C2JVf7RJtUq0ixHMUvB+gnSV8h7t/Os7yOGNJ9dsOfK8Tj1Mnto9LAU7PKPhgAlgdLlQ5IOwwcjR+3UStqeoKCFosjU0TedkrAj8AMCFBQ+8OJTlID5TMysLAc2IQiGiiSwaLk2/KRg5MJ6ObZpeoBMiFOn8LuH1JTvb8AZM2bYI488YgsWLLANGzbYxIkT7ZJLLgneHwgEbNCgQfbcc8/Ztm3brFWrVjZq1Cg75phjgtts3brV+vbta++++64VL17cOnXqZE8++aSVK1fOfMlvw7jycpYRp32ONuwZqdb4pme3y7ptf7iflULW20nmOjvRuulCA89a27NtfflmlhZScJ6ReHUnpW2G/ROftXMFCVB27txpJ554ol111VXWsWPHHPc//PDD9tRTT9lLL71kRx11lN11113Wrl07W7ZsmZUtW9Zt07lzZxfcTJ061bKzs6179+7Wq1cvGz9+fHxeFTLqy6Dl2mfDrn+948coWx2XmExHUWY5ivjM33dDoaPNOQN/LrTpsiNHJGVfkD7y/e3avn17d4lG2ZMnnnjC7rzzTrv44ovdbS+//LJVr17dJk2aZJdffrktX77cpkyZYvPmzbPmzZu7bYYPH24XXHCBPfroo1azZk3LJGQoDjAhVMUmliyhc6WU3/NL4rt0CrAyLxD3f3NxCuYzRdTJNZOdDUkTcT39W7NmjW3cuNHatm0bvK1ChQrWokULmzVrlgtQ9LNixYrB4ES0vbp65syZY5deemmOx92zZ4+7eLZv3x7P3U5L0fpy85LFiPzHFpmdkNl1ehVu59KIFzR43Q1SO4n7AyA15PxufbRod+CToZkVoCg4EWVMQum6d59+VqtWLXwnSpa0SpUqBbeJNHToUBs8eHA8dzWlpWQhWQGr8/3Wb4yikVu2KDQYzBRFuShkjlmW8zDDcrT3JLIANpEZi5YJqrlIlwz3sBRdQiUFdtFs4MCB1r9//7AMSu3anKdmorh9OWU4r74k2qKJSFFRapSO/Dg8sPmhZxHuD/IlXYIh3wYoNWrUcD83bdpkhx9+ePB2XT/ppJOC22zevDns7/bt2+dG9nh/H6lMmTLuko4ZgmhdKDkkaNIhv/2DKMqzMCRG9Noapo0H/GZWCkxuF9ep7jVqR0HGtGnTwrIdqi3Jyspy1/VTw481TNkzffp0279/v6tVAQAAyHcGZceOHbZq1aqwwtiFCxe6GpI6derYTTfdZPfff7+b98QbZqyROd5cKY0aNbLzzz/fevbsaaNHj3bDjPv06eMKaDNtBA/yL1rG6Z3i//s8Sm0rulWJU6J2w+fJx1TNDG0v878scTyl7WitqCOGjii6Or0U1DLZhbSpFqDMnz/fzj77f5WKXm1I165dbezYsTZgwAA3V4rmNVGmpHXr1m5YsTcHiowbN84FJW3atAlO1Ka5U1JKASqg/VY/kewROpHP/w5LVyaNVxgZOpw6RwPMDKwpJS+BTqXJ68KuL2pdJYF7hEJ392eYfAcoZ511lpvvJJZixYrZvffe6y6xKNuSbpOy5WVkTUE/gPE6G8gxhDguj5o+MnF0SLqJnFyuwDPLEozl/u+iomWsvH2PJybTMSxFR+MUVBq/NPhJuqRckSHdET4/htGn1c+M94JMQ+YgQCnopGc+rHgG8ltTofVhAGBWASf3TCQClAw6q+DMA0icgixfkE5r7yRKOnxvJTKDPCsVJ+7MIwIU+FrkCJ2L9h9dsD7zZC5ziyKr78ikIKEgjnxjTtj1olw0JBXmOfLb3FDxCupSdXkSApQCRuzUVPgjYPEjzc5aMqRGoFaxpO5OgRvtpK7cnEDpUDdT0Pc0bkXESfy3Gu1x83LigtSTHt84AIAiyzAx4g1FgQAFAIA0rX9JZUyNBQAAfIcMChJS0d80QVNYA35BQW5qi1ZH2NKe9fWkmC0zLKNDgJJAmfZhQvoWZSIznPj5/5Y6KIqp7/MySi8VCuORGAQoyKi1ZgAAqYEABQmp6F9XgPlL/HwmiYKLHNqa7GHXfgmUQxdmRHohex4fBCgoklWI/TR3QVGnsQEgmVqmaMBEgAIg6bU01OkAiESAAiCh0nVGWgCJxTcFAEQgo5P+a3bB/5ioDQAA+A4ZFCABIzUYoREb2QkAeUEGBQAA+A4ZFACALzBrLEIRoCBp4+X5MgIAxEIXDwAA8B0CFAAA4DsEKAAAwHcIUAAAgO8QoAAAAN9hFA8yXuTqxgCA5CODAgAAfIcMCsKw6BaA/GQbF7WukpR9QfojgwIAAHyHDAoAIG5ZFTIqiBcyKAAAwHfIoGQQ6ktSy/bd+5K9CwCQNAQoSDuknAEg9dHFAwAAfIcMCgAg7dHFnXoIUAAAIIjxHQIUAIBlejAC/yFAyWD8AwUA+BUBCtIeiwECKEp0FcUHo3gAAIDvkEFJU3TfAABSGQEKACBumCgR8UKAkiYyOWNCjQmAZMnk796Uq0G55557rFixYmGXhg0bBu/fvXu3XX/99Va5cmUrV66cderUyTZt2hTv3QAAACksIRmU448/3j7++OP/PUnJ/z1Nv3797L333rM33njDKlSoYH369LGOHTvaF198kYhdAQAgbsiYpHiAooCkRo0aOW7/7bff7IUXXrDx48fbOeec424bM2aMNWrUyGbPnm0tW7ZMxO4AADIIQUR6SEiAsnLlSqtZs6aVLVvWsrKybOjQoVanTh1bsGCBZWdnW9u2bYPbqvtH982aNStmgLJnzx538Wzfvj0Ru408/kNnTD8AIOUClBYtWtjYsWPt2GOPtQ0bNtjgwYPt9NNPt6VLl9rGjRutdOnSVrFixbC/qV69ursvFgU4ehwAANIRJ4NFEKC0b98++HuTJk1cwFK3bl17/fXX7aCDDirQYw4cOND69+8flkGpXbt2XPYXqYdROwCQ/hI+k6yyJQ0aNLBVq1a5upS9e/fatm3bwrbRKJ5oNSueMmXKWPny5cMuAAAgfSU8QNmxY4etXr3aDj/8cGvWrJmVKlXKpk2bFrx/xYoVtnbtWlerAgAAkJAunltuucUuvPBC163z008/2aBBg6xEiRJ2xRVXuGHFPXr0cN01lSpVcpmQvn37uuCEETz/h35IAAASEKCsX7/eBSNbtmyxqlWrWuvWrd0QYv0uw4YNs+LFi7sJ2jQyp127djZy5Mh47wYAAEhhcQ9QJkyYkOv9Gno8YsQIdwEApDfW5olfRv2iDMumJ7wGBQAAIL9YLBD5xiyNAIBEI4MCAAB8hwxKEWKEDgCkDrLFyUUGBQAA+A4BCgAA8B26eFIQaUcASB107xcMGRQAAOA7ZFBSABkTAEgvfK8fGAEKUm4mSgBA+qOLBwAA+A4BCgAA8B26eJKMfkgAQEG9k8YjhAhQAAC+Wd2Y1Y8L7500WQWZLh4AAOA7ZFASiO4bAAAKhgAlw1NoyUY6FwAQDQFKnJAtAYD8Y56jxHsnRQtpqUEBAAC+QwYFAIAU8E6GZeoJUKKgvgQAgOSiiwcAAPgOGRQkFKN0AMS7iJbvkczoKSCDAgAAfIcABQAA+A4BCgAA8B1qUFCkmJQJAJAXBCgAgJRC8X3RzLGSZclFgJIHmTY5DgCkEgKW9EQNCgAA8B0yKIhrDQlnLgCAeCCDAgAAfIcMCuKKUToA/IYaldREBgUAAPgOAQoAAPAdAhQAAOA71KAAADIaKyb7EwEKACCjUMyfGujiAQAAvkMGJYMx9A4ACvb9yPdn4pFBAQAAvkMGJY0Q0QNAYlC3UvQIUBBEgAMA8AsCFAAACokTvDSrQRkxYoQdeeSRVrZsWWvRooXNnTs3mbsDAAAyPYPy2muvWf/+/W306NEuOHniiSesXbt2tmLFCqtWrVqydiut5bcPlT5XAEDGBSiPP/649ezZ07p37+6uK1B577337MUXX7Tbb7/d0l1+h7ABANK7C4huIh8EKHv37rUFCxbYwIEDg7cVL17c2rZta7Nmzcqx/Z49e9zF89tvv7mf27dvT8j+7fkjO+z6CbO2hF1fmlU51/vzss2uiO2Pmb4x1/vj8Tp27d1nfhe5z6m636mwz6m636m4z6m636m4z37Z7wPtQ+R3vtvmAI9R1BLRxnqPGQgEDrxxIAl+/PFH7Vngyy+/DLv91ltvDZx66qk5th80aJDbngsXLly4cOFiKX9Zt27dAWOFlBjFo0yL6lU8+/fvt61bt1rlypWtWLFihYrkateubevWrbPy5cvHaW8RDce66HCsiw7HuuhwrNPjeCtz8vvvv1vNmjUPuG1SApQqVapYiRIlbNOmTWG363qNGjVybF+mTBl3CVWxYsW47Y8OPh/4osGxLjoc66LDsS46HOvUP94VKlTw7zDj0qVLW7NmzWzatGlhWRFdz8rKSsYuAQAAH0laF4+6bLp27WrNmze3U0891Q0z3rlzZ3BUDwAAyFxJC1D+/ve/288//2x33323bdy40U466SSbMmWKVa9evcj2Qd1GgwYNytF9hPjjWBcdjnXR4VgXHY515h3vYqqUTdqzAwAA+G2qewAAgGgIUAAAgO8QoAAAAN8hQAEAAL6T0QHKiBEj7Mgjj7SyZcu6FZXnzp2b7F1KaUOHDrVTTjnFDj30ULci9SWXXOJWpw61e/duu/76690swOXKlbNOnTrlmLAP+ffggw+6WZVvuumm4G0c6/j68ccf7R//+Ic7ngcddJA1btzY5s+fH7xf4w00KvHwww9392ttsZUrVyZ1n1PRn3/+aXfddZcdddRR7jjWr1/f7rvvvrC1WzjWBTNjxgy78MIL3Syu+r6YNGlS2P15Oa6axb1z585u8jZNmNqjRw/bsWOHJUQgQ02YMCFQunTpwIsvvhj45ptvAj179gxUrFgxsGnTpmTvWspq165dYMyYMYGlS5cGFi5cGLjgggsCderUCezYsSO4zbXXXhuoXbt2YNq0aYH58+cHWrZsGTjttNOSut+pbu7cuYEjjzwy0KRJk8CNN94YvJ1jHT9bt24N1K1bN9CtW7fAnDlzAt9//33gww8/DKxatSq4zYMPPhioUKFCYNKkSYFFixYFLrroosBRRx0V+OOPP5K676lmyJAhgcqVKwcmT54cWLNmTeCNN94IlCtXLvDkk08Gt+FYF8z7778fuOOOOwJvvfWWWw9n4sSJYffn5bief/75gRNPPDEwe/bswMyZMwNHH3104IorrggkQsYGKFqU8Prrrw9e//PPPwM1a9YMDB06NKn7lU42b97s/hF89tln7vq2bdsCpUqVcl84nuXLl7ttZs2alcQ9TV2///574JhjjglMnTo1cOaZZwYDFI51fN12222B1q1bx7x///79gRo1agQeeeSR4G16D8qUKRP497//XUR7mR46dOgQuOqqq8Ju69ixY6Bz587ud451fEQGKHk5rsuWLXN/N2/evOA2H3zwQaBYsWJuEeB4y8gunr1799qCBQtc+spTvHhxd33WrFlJ3bd08ttvv7mflSpVcj91zLOzs8OOe8OGDa1OnToc9wJSF06HDh3CjqlwrOPrnXfecbNeX3bZZa77smnTpvbcc88F71+zZo2bcDL0eGu9EXUdc7zz57TTTnPLnnz33Xfu+qJFi+zzzz+39u3bu+sc68TIy3HVT3Xr6N+CR9ur/ZwzZ07c9yklVjOOt19++cX1c0bOWqvr3377bdL2K51obSXVQ7Rq1cpOOOEEd5s+/FqHKXKhRx133Yf8mTBhgn311Vc2b968HPdxrOPr+++/t1GjRrklOv71r3+5Y37DDTe4Y6wlO7xjGu07heOdP7fffrtbSVcBtRaV1Xf1kCFDXN2DcKwTIy/HVT8VoIcqWbKkOwlNxLHPyAAFRXNmv3TpUnfmg/jTEug33nijTZ061RV5I/EBt84aH3jgAXddGRR9vkePHu0CFMTP66+/buPGjbPx48fb8ccfbwsXLnQnOyrs5Fhnlozs4qlSpYqLzCNHNOh6jRo1krZf6aJPnz42efJk++STT6xWrVrB23Vs1b22bdu2sO057vmnLpzNmzfbySef7M5gdPnss8/sqaeecr/rrIdjHT8a1XDccceF3daoUSNbu3at+907pnynFN6tt97qsiiXX365GynVpUsX69evnxslKBzrxMjLcdVPfe+E2rdvnxvZk4hjn5EBitKyzZo1c/2coWdIup6VlZXUfUtlqrtScDJx4kSbPn26GyYYSse8VKlSYcddw5D1Jc9xz582bdrYkiVL3Nmld9EZvtLg3u8c6/hRV2XkkHnVSNStW9f9rs+6vqBDj7e6KdQvz/HOn127drmahlA6odR3tHCsEyMvx1U/ddKjEySPvuv13qhWJe4CGTzMWNXJY8eOdZXJvXr1csOMN27cmOxdS1m9e/d2Q9Q+/fTTwIYNG4KXXbt2hQ191dDj6dOnu6GvWVlZ7oLCCx3FIxzr+A7lLlmypBsCu3LlysC4ceMCBx98cODVV18NG6Kp75C33347sHjx4sDFF1/M0NcC6Nq1a+CII44IDjPWkNgqVaoEBgwYENyGY13wUX9ff/21u6j5f/zxx93v//3vf/N8XDXMuGnTpm64/eeff+5GETLMOAGGDx/uvsA1H4qGHWtcNwpOH/hoF82N4tEH/brrrgscdthh7gv+0ksvdUEM4h+gcKzj69133w2ccMIJ7sSmYcOGgWeffTbsfg3TvOuuuwLVq1d327Rp0yawYsWKpO1vqtq+fbv7HOu7uWzZsoF69eq5uTv27NkT3IZjXTCffPJJ1O9oBYV5Pa5btmxxAYnmpilfvnyge/fuLvBJhGL6X/zzMgAAAAWXkTUoAADA3whQAACA7xCgAAAA3yFAAQAAvkOAAgAAfIcABQAA+A4BCgAA8B0CFAAA4DsEKAAAwHcIUAAAgO8QoAAAAN8hQAEAAOY3/w+ZdOxZGfyGLwAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "values=[]\n", + "for _ in range(10000):\n", + " values.append(algorithms.hyperband(SimpleSpace(),sampler=\"uniform\")({},None).config[\"int_param3\"])\n", + " values.append(algorithms.neps_hyperband(SimpleSpace(),sampler=\"uniform\")({},None).config['SAMPLING__Resolvable.int_param3::integer__1_100_False'])\n", + " values.append(algorithms.hyperband(SimpleSpace(),sampler=\"prior\")({},None).config[\"int_param3\"])\n", + " values.append(algorithms.neps_hyperband(SimpleSpace(),sampler=\"prior\")({},None).config['SAMPLING__Resolvable.int_param3::integer__1_100_False'])\n", + "\n", + "plt.hist([v for n,v in enumerate(values) if n % 4 == 0], alpha=0.5, label='HB with uniform sampler',bins=100)\n", + "plt.hist([v for n,v in enumerate(values) if n % 4 == 1], alpha=0.5, label='NePS HB with uniform sampler',bins=100)\n", + "plt.hist([v for n,v in enumerate(values) if n % 4 == 2], alpha=0.5, label='HB with prior sampler',bins=100)\n", + "plt.hist([v for n,v in enumerate(values) if n % 4 == 3], alpha=0.5, label='NePS HB with prior sampler',bins=100)\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "4d423fb2", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAGdCAYAAAA44ojeAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAAQalJREFUeJzt3QucjHX///EPLUtCDiEhKkUl5RCLuxOlklTuSrdKEhUqKspdSHJIhcghkkPppCK5SwmJrHOUyKHcSKETG3LK/B/v7+9/zT0zO3uetdfuvp6Px9idmWtnvnPNmO/7+h6ub4FAIBAwAAAAHymY0wUAAACIREABAAC+Q0ABAAC+Q0ABAAC+Q0ABAAC+Q0ABAAC+Q0ABAAC+Q0ABAAC+E2e50LFjx+ynn36y4sWLW4ECBXK6OAAAIB10btg///zTKlasaAULFsx7AUXhpHLlyjldDAAAkAnbt2+3SpUq5b2AopYT7wWWKFEip4sDAADSISkpyTUwePV4ngsoXreOwgkBBQCA3CU9wzMyPEj2iy++sJYtW7r+Iz3BjBkzUtz2vvvuc9sMHz487Pbff//d2rZt68LFySefbB06dLB9+/ZltCgAACCPynBA2b9/v9WuXdtGjRqV6nbTp0+3JUuWuCATSeHk22+/tTlz5tisWbNc6OnUqVNGiwIAAPKoDHfxXHPNNe6Smh07dtgDDzxgn3zyibVo0SLsvvXr19vs2bNt+fLlVq9ePXfbyJEj7dprr7Xnn38+aqABAAD5S1x2TAG+4447rEePHnbeeecluz8xMdF163jhRJo1a+amGy1dutRuvPHGZH9z6NAhdwkdZAPAX1MHjx49an///XdOFwVADjrhhBMsLi4uJqcAiXlAefbZZ13hHnzwwaj379y508qVKxdeiLg4K126tLsvmkGDBlm/fv1iXVQAMXD48GH7+eef7cCBAzldFAA+cOKJJ9qpp55qhQsX9k9AWblypb344ou2atWqmJ5ArVevXvbwww8nm6YEIGepxXTLli3uqEnds/pC4uSJQP5tST18+LD98ssv7nuhevXqaZ6M7bgFlIULF9ru3butSpUqwdvU5PvII4+4mTz//e9/rUKFCm6bUGoa1swe3RdNfHy8uwDwF30ZKaTogEFHTQDyt6JFi1qhQoVs69at7vuhSJEi/ggoGnui8SShmjdv7m5v3769u56QkGB79uxxrS1169Z1t82bN899yTVo0CCWxQFwnGTlKAlA3lIwRt8HGQ4oOl/J5s2bg9fVjLN69Wo3hkQtJ2XKlAnbXklKLSPnnHOOu16zZk27+uqrrWPHjjZ27Fg7cuSIde3a1dq0acMMHgAA4GQ45qxYscIuuugidxGNDdHvffr0SfdjTJ061WrUqGFNmzZ104ubNGli48aNy2hRAADHwWWXXWbdunVLdZtJkya5GZp+KJ8GbLdu3dqdDFRjotRqj+iqVq2a7GSqfhGXmQ+CBsKkl8adRFJryxtvvJHRpwaQiwybs/G4PVf3K8/O8N/cddddNnnyZDdL8PHHHw/errNj63QHGfmeizVV9qpwo1WsqnB1IswbbrgheN3jDVb+5z//6V5XrMbuvf/++641PLRSU/nSCi3HS2T59L5qTOTixYutbNmyVrJkyRwtHzKHjmMA+ZYG8OnUCH/88YflZhMnTnRTvdXlPnr0aHvttdfsmWeeidnj66AyPYu75ZTI8n3//fduOMH555/vhhhkZmaZJnhobCRSp4Gw2YWAAiDf0qB+VWBqbUjNokWL7B//+IeboaAZSzrPk5b9CG1R6N+/v912221WrFgxO+2008KWA1FrzFNPPeXG6alVQ60cKZ0rKjPUtaLXobJdd9111qpVK3e6h5SohUVj/zxqCVEl/t133wUrHb2Ozz77LFkXin7XDI3u3bu7v4ms/HUGcYWDk046yY03VHDKSLeQWrBCH1P77cILL3ShS/tZrSEas/jnn38Gt4ks3wsvvOCWUNHj6LoohN55551WqlQpN+NMZ0TftGlTsrLMnDnTzj33XPc+bdu2zT2nwp7+Vq/p9NNPd9toKq32s2674IIL3PCHlKT1/uu16eSlCll6H//1r3+FzXb9/PPP3WvRvtWQCn0Or7jiCrfNxx9/7Pa3urP0d6HnI9Jr1/usi/abWpN69+6dauugWu3uueceO+WUU9xj6nnWrFmT7P145ZVXrFq1almapZMWAgqAfEtdIgMHDnTLbfz4449Rt9HRuCpajWn4+uuv7e2333aBJbSCl+eee86tU/bVV1+5LqOHHnrIrTcm7733ng0bNsxefvllVymqEq5Vq1a2vKaNGze6mZGpzYq89NJLXaXnWbBggau8vNu0FIkmMDRq1Chqd0qlSpXs6aefduEjNICoctSSJapwFRBUwT/66KNZfk16D7TPtHabLirv4MGDo26r8mkShmaMqmy67nXpKUQoXOiM5qqkNQZSrzO0/GpRU+Wr9eK8k4rqvWvcuLF7b7V8i2amKrDcfvvtLgieeeaZ7npKFX9a77/KoICrIKD7NDTirrvuSvY4CgcvvfSS67ravn273XLLLW78iIZM/Oc//7FPP/3UfZZDqbtLJ0NdtmyZO0/Z0KFD3etLyc033xwMPpptW6dOHTdeVKcC8WiijF6T9q0myWSXmJ9JNk+an/rRlfwyK8qbVPUfaf7dKQ+Ef8kBOL403kRHhH379rUJEyYku1+tK1rg1DtC18mnRowY4Sr5MWPGBI8gVYF5Y1nOPvts+/LLL12ldOWVV7qKWkfGarHRWAkdSV988cWplmvv3r3u6Dw91HKjsKVzSmlZELWi6ASXKdGRtQKUWgFUea1bt84dWSugaBV6/axfv37Uc9uoO0XP5R3th1JFq9mZqrBFIU5BJqvU1aIWDq8bRwFh7ty5NmDAgKjlU7l10kCvfAoFCiZ6T7zQpckaanFSIFCl7JVfXWQKmqEUZO699173uyaE6H3X/vH+7rHHHnOBaNeuXVHP55XW+3/33XcHfz/jjDPc56t+/fpu1mzoZ0AtOfqcSYcOHdx7rPCmv/FaxubPn+/K49Fr1OdQLTCaTfvNN9+46wpxkRS8FWQUULzxSwqc2kfvvvtucFFftbBNmTLFtbJkJ1pQAOR7OmrWkaYWM42ko1pVjqoovIvO7+SdRdejCiqUrnuPp4rsr7/+chWJKgYNclWYSI0qYx2dRl6iUYWj+1RWtTCoFUWVeEo0NkMVuVoiNJhU3QYKNbou+ul1jWSEgoEXTkSnO488MWdmqJsldIxJRh9X74OCWGirkk6JoQo79D1XqFF3TaTQ28qXL+9+hraAeLelVKa03n+1VLRs2dIFF71OhV8v2KRWDu1vL5x4t0WWoWHDhmFdZvpcKrBFWzdLnx+FIu2b0M+7PucKQh51c2V3OBFaUADke5dccokLHToijWxa1xe2jp6jjRkJPWt2anQUu2HDBjemQ90+nTt3dl1CCgKhs08iT3Z11llnpevxdXTubatKV+Mz1KqiI+5oj6EKS69ZLSU6UlYYUeWn1pe1a9e6LoTMdM1EvhY9T2rjHfQaI+8P7XJJ7XGzYwCrxnZEG1Ab+vze/dFuS6lMqb3/ao3QZ08Xteqo4lcwad68ebIBqJHPGev9os+6wl9o958ndKyQxicdDwQUADBzYxrU1eOdVNKjPnh1gaQVFpYsWZLsugYvhlZ+OkrWpUuXLu5cUGpu1+PHmrpgREftKdFR+vjx411AUVeJwoJCiypOBRWvKyEatTTEYuVqVcYKUxpw7FV62TGmQe+DWiyWLl0a7OL57bffXGjQgNjjIaX3XwFNZdHnz1tjLrUBtxml1xz5uVQ3pfcZCaXPohbtVWuTWq1yGl08APD/m+w11kT9/6HUn68WBY2nUOWp5vEPPvgg2SBZjW8YMmSI617RDJ5p06a5cR6iLiKNb1HrxA8//GCvv/66q7DUVB4LmnmhiuWnn35yR+Ua96FxMKEBKZJaTRS8NBhUJ8v0btNRvGaUpHaUrMpLg2B37Nhhv/76a6bLrS4XdVP8+9//dl0IGuypfRVrqpA140bdKxpnoa4MDXDVbCvdnt1Se//VCqfAp8Gtuk9jZTRgNlbUGqMTqiqMvfnmm+55vM9lJI2RUReQzrGjAbcarKvP/hNPPBHT0JRetKCk4wRTDbf9lmybmQX/d7p/qb0n+X/SpPX/15+bmoXvZf38Cwlnhi8vEE3nCztn+XmAvE4Vu2bphFLXhyp9fUlrqrGOeDXO4tZbbw3bToui6ku8X79+bnqmZkuomd5rHtcRsioKtTwoDH344YfJlgbJLG+tMzXxq7tHLSGanaQj4ZSoDCqXgow3EFMBReVLa/yJ9pO6vbQf1NqS2ZPaaRyMKusePXq41hzNFtFMFW8wZqzPFaOKWWNt1HWiffTRRx+l2MUWS2m9/wowCmkKx2rF0MDU66+/PibPrdlFaknToFy1mmgfpLR/9fnRPtFnXZ8pDaL2Pk/eOJvjqUAgJ0+XmElJSUluTrdGueuLIPsDyri0A8qiKAEl/tQ0n2th0+QDsjKKgIKccvDgQTeALrvPh+B3fjuzKiAKmuq2PN6nsk/teyEj9TddPAAAwHcIKAAAwHcYgwIAWRRtUVQgp30eZbpwbkILCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgD4gE4zPmPGjFS30UrLWifFD+X77rvvrGHDhu5MoTpbKVKegq59lx2LIOZ1nAcFQPaYP+j4PdflvTL8J6rstcheZCjQuSMuv/xy++OPP9waKt51jyrkM844I9U1TTLj559/tlKlSgUrNZ0m/KuvvvJN5R9aPunbt69bUFCL0Hlr+QCxREABgHRQRay1Q7TwmhZ6u//++91ieVrgLha0KJufRZZPqw+3aNEiSysya9E+reSL1B3Op/uJLh4ASIdy5cq5SlotGw8++KD7uWrVqqjbag3WU045xd59993gbWoJOfXU/y0gumjRIouPj7cDBw4k60LRY8tFF13kbo9cXVir3eqxtBpuly5d7MiRIxnqFtKihqGPqd/1mnr27OlWGNbr1KrCoULLp99XrlzpVjXW796233zzjV1xxRVWtGhRVza1MO3bty9ZWQYMGGAVK1a0c845J9gF8s4777jVovW39evXt40bN9ry5cutXr16roXmmmuucavrpkQtXm3btnX7XY9RvXp1t4Kx57HHHnMrN5944omuBax3795h+02vQe/Rq6++alWqVHHP2blzZ7f68JAhQ9w+0WdAZY/cL2PGjHHl0/PqsUPf92jWrl3rttdzaJXgO+64w3799dew96Nr167ufSpbtmxwVez8hoACABmg8DF79mzbtm2bNWjQIOo2qrS0RL13qnFVnuvXr3etLxq7IQsWLHAVsSrMSMuWLXM/P/vsM9e18v777wfvmz9/vmu90M/JkyfbpEmT3CWr9Fjqslm6dKmrkBU+5syZE3Vblem8886zRx55xP3+6KOP2v79+11Fqm4gBYtp06a58quiDTV37lzXGqXHnjVrVliX0ZNPPulCX1xcnP3rX/9ygenFF1+0hQsX2ubNm61Pnz4pll+BY926dfbxxx+7fa3QoMrdU7x4cbeftI0ec/z48TZs2LCwx9B+1d/r/X3zzTdtwoQJrpXoxx9/dO/Xs88+68qofRT53K1bt7Y1a9a4kNSmTRtXhmjUragQp/C5YsUK91y7du2yW265Jdn7UbhwYfvyyy9t7Nixlh/RxQMg31IFGTl+QkfM0VSqVMn9PHTokB07dsxV4AohKdFR8Msvv+x+/+KLL1yFpKNwhZYaNWq4n5deemnUv1UrgKgVIrJrRQHgpZdeshNOOME9jipQVfodO3a0rLjgggtcSBC1Pug59LhXXnllsm1VJoUI7TuvfKrwDx48aFOmTHFBR/QYLVu2dBW7WgpE973yyivBLgtvHSOFHK+lQON7brvtNvf8jRs3drd16NAh1SCmwKh9rBYXqVq1atj9ChYe3afne+utt1wI8uh9VQuKwsy5557rxh4pTH300UdWsGBB1+Kj16JwGBpOb775Zrvnnnvc7/3793fha+TIkTZ69Ohk5dQ+UTkHDhwYvE3PWblyZddqpFYe7z0YMmSI5WcEFAD5liogHWmH0tHx7bffnmxbHcWr4lJAUQuHWgbUHaKxKNEofKiiVbeEjr4VWLyAosp28eLFYZVjeqnlQuHEo64eda1klQJKKD3u7t270/33ajGoXbt2MJyIwoUqfVXyXkCpVatW1PEUoc8fum3obamVR++DWjHUAnPVVVe5rqRGjRoF73/77bdtxIgRrpVE3U5Hjx51Y4pCKbjoPQ59Tu1rhZPUypGQkJDsekqzdtTKooATbWCxyuYFlLp161p+R0ABkG+pMj3rrLPCblNzfjQaF6JZPV5IUJDReISUAooqVwUYhRNdtK0Cio7A1QWi8Q+hFWh6FSpUKFl3kkJASlS5qlsqVLQxKxl93MwKDTApPb+eO9ptqZVHYzq2bt3qWjvUgqHByxqfo/E6iYmJruulX79+rpWmZMmSrvXkhRdeSLEM3nPGer8oHHmtSpFCxygVS2E/5SeMQQGATNCRtcaUpEQVmQZ9fvDBB/btt99akyZNXCuBWmDU9aOuiJQqIa+FIaXupoxQd5HGiYTKjnNy1KxZ07UOaCyKR+MnvK6R40GvtV27dvb666/b8OHDbdy4ce52tVZpttETTzzh9ru6TxRmYmXJkiXJrmt/RFOnTh33eVBrjcJx6IVQEo6AAgDpoGb9nTt3uopNA0Bfe+01a9WqVap/o24dDbbU7BA16auy1riVqVOnpjj+RDRbRDNCvAGUe/fuzXS5NSBTgzE1NmTTpk1unIlmkcSaWih0jhgFBD2+ujEeeOABN0PF67LJThpAqzCowbQKABpf5IUEBRKNUVGribpR1NUzffr0mD23Pg8aR6IxJNq/XhdgNGrV+f33390YG7WkqTyffPKJtW/fPiaBNC8hoABAOqgVQE3wOtLVlNV7773XDYRMjUKIKp3IKb2Rt0XSAFRVompp0XTctIJQatSloVkmGu+iWUN//vmn3XnnnRZrmo2kilaVr57nn//8p+tm0aDQ40GtTr169XKtVAqBauFSIJHrr7/eunfv7kKDwqJaVLRPYkVdR3ouPbeCoEKpBtlGo/dTLUv6DGisjLoCNZ1Y3YehY11gViAQ2TmZCyQlJbk+RB1VRA5yioVhczaGXW+47f+aCUPNLLg57HrtRb8mL2f8//oTU7KwafjAtMxIOLNMmtt0vrBzlp8HiKRZG1u2bHHjM3T0DOQ36spTa0xOLUGQ274XMlJ/E9cAAIDvEFAAAIDvMM0YAIBMyoWjJHINWlAAAIDvEFAAAIDvEFAAAIDvEFAAAIDvEFAAAIDvEFAAAIDvEFAAAKnSafl1OvbUTJo0Kbja8/Fw11135buztxYoUMBmzJhh+UWGz4PyxRdf2HPPPWcrV650K2SGnuJXS3g/+eSTbrnrH374wZ3OtlmzZjZ48GC3/oBHazVoEakPP/zQrT3QunVre/HFF91iWgDyhtGrRx+358rMUg6q4CZPnmyDBg2yxx9/PHi7KoAbb7wxR89vocpegWDPnj1pnlpd1z1af0bftVoHR68rPj4+JuV5//33rVChQsHrWolX5UsrtGQn1RmcgyRvy3ALipbSrl27to0aNSrZfQcOHLBVq1a5RZj0Ux/qDRs2uIWaIle91GqTc+bMcStOKvR06tQpa68EADJI64Q8++yz9scff1huNnHiRHfAqPVPRo8e7VZafuaZZ2L2+KVLl7bixYubH2iRvWPHjrkD4Ky02CjcHD161PKTw4cPW54OKNdcc4374OsII5I+MAodt9xyi1v5s2HDhm4lS7W2aKlrWb9+vVtC/JVXXrEGDRpYkyZN3IqgWgnyp59+is2rAoB0UAtvhQoVXGtDahYtWmT/+Mc/rGjRola5cmV78MEH3cFaaItC//797bbbbrNixYrZaaedFnYQp8rwqaeesipVqrhWDbVy6DFiRRW1XofKdt1117nVj3WQmBK1sGhlX49aQtQS89133wUrMr2Ozz77LFkXj37funWrWx1YfxPagiNa0bhmzZquRfzqq692wSkln3/+ufv7//znP24lYAVG1Rtr165N1nU0c+ZMt0Kw9p/qk8gunkOHDrl9Wq5cOfc4qluWL1+e7Lk+/vhjq1u3rnscva+R9Nq1b7RytR7n9NNPD/t8DB061K1ArP2j/d25c2fbt29fsvLq4Fv1oFZ51v7WAbxa7PRZKVWqlCurwlZ6P0PRbN++3dW3ej6FSL3v//3vf4P3e/towIAB7jOn8uQm2T4GRSsW6kPhJd3ExET3e7169cK+JNTVs3Tp0qiPoQ+eVkAMvQBAVqlLZODAge4g6ccff4y6zffff+8qWnVFf/311/b222+7ii20ghd1fat1+auvvnJdRg899JA7YJP33nvPhg0bZi+//LJt2rTJdSOpkssOGzdutHnz5rkDwJRceumlrsL2LFiwwMqWLRu8TRW7uuwbNWqU7G/VMl6pUiV7+umnXfgIDSCqhJ9//nnXgqOWcQWJRx99NM0y9+jRw1544QX3vKeccoq1bNnSPX/o46qlSwe2an1XCInUs2dPt58VAhTOzjrrLGvevLkbUhBK742GHehgWaEo0ogRI1wYeuedd1wPwNSpU1148Kiu0jYqh55L+1rPHUrl1TY68NYBufarDuo1/EEX7R99Ft599910f4Yiaf/o9alla+HChfbll18GQ2FoS8ncuXPd6/B6LHKTuOxecvmxxx5zidBbVnnnzp3JPlxxcXEu/em+aJRe+/Xrl51FBZBPqeK48MILrW/fvjZhwoSo3z/qlvZaEKpXr+4qH1XyY8aMCS4n37hx4+BYlrPPPttVGAolV155pauo1cKhgzGN5VBLysUXX5zmwV16x+XpO1ZhS10WOqBTK0qvXr1S3F6tIKr8fvnlF/f9u27dOtc1r4r0vvvucz/r16/vjv4j6btaz6WKUa8pstIcO3asnXnmme66QpyCTFq077WfRJW+ApDG2ah1wHtcdV2p8o5GrVl6L9R6oVZ+GT9+vKuU9Z4qAHlUHu+5otF7pfdYLTA6uFYLSqjQcTcKLupR0D5T+UL3g8rj7Qe1oCiU7Nq1y72nagm6/PLLbf78+XbrrbcG/y61z1AkBWV1dSm0ea1Y6upTA4Dev6uuusrdptYYbVO4cGHLbbKtBUVvkD5catrUG5UV+o+m/6zeRc1aABArOjpXxaij6khr1qxxFZ8qFu+iI1dVDhrz4UlISAj7O133Hu/mm2+2v/76y8444wzr2LGjq3zTGv+gALB69epkl2hUiek+lVVHyWpFueOOO1J87PPPP98FDbWc6Oj7oosucqFG10U/FWIySoHGq5RF3SS7d+9O8+9C953Kpa6I0PdClWu01o7QVi7VOargPQqCCoGR72lo63006hbRvlQZ1A3z6aefht2vbq+mTZu6Lhi9R9rPv/32m2s1SWk/lC9f3oWZ0MCp2yL3TWqfoUh6rzdv3uzK4H0ute/UMKD94VFLXW4MJ9nWguKFE/VTqvnLaz0RJe7IN0X/UdUMF5nGPeorjNVodACIdMkll7jQoYMhVVChNL7g3nvvjTpmRC0h6aGxCmpmV+Wmo3qNW1BzvoJA6OyYUOpKUDdFeui709tWFeuff/7pWlV0dB/tMXTErdesI219tyqMKACo9UXjPxYvXpyurplIka9FzxOLmTYa+xM51iWz1KKQmjp16rjgqbEqer9Ul6nlS90xGt+hIHf//fe7cR0KBOru69Chg+tW8Vqcou2HaLcp5GbWvn373FgadUFFUjdZel9vvgooXjhRP6uar8qUKZMsEWrqnAbOaueKQozeqNT6TAEgO2lcgrp6IgcSqsJSF0haYWHJkiXJrmuwaGglq7EVunTp0sVq1Khh33zzjXv8WFMXjKjVJiXqolI3iAKKKlsFIoUWBScFldDWiEg6Ig8d4JlV2lde2NOMKrUAhe67tKi1QmVSl4jXJaO6SGNaMjMVWgfV6nrRRd0zGtehg2jVW6qrNF5G+0s0ViVW0voMhdLnRt08GjIR2giQl8RlJrWpWcmjpKnmMCVJNefpzdQAJTUz6gPsjSvR/foAaWfrzVYzp/oq9SFSP2WbNm3CzpUCAMeTmsI11kTjS0JpHJ1mluh76p577nFHpAosagnRLEWPKschQ4a4WRO6b9q0aW52iqiLSN+HOgjTUfbrr7/uAkvk+IbM0kGfvmtVeergUOMsNIYhtUperSaaiaPvZY238G5Ty4nGn6R25K3uCg2C1fe2Ao4G2GaFyquDWXV7PPHEE+7xMnISNpVVrRoaa6K6RmFH74W6XdS6kRGapaO6TN1eCiF6H9VCpbEdCqmqszSoWkFT77nqsVhJ7TMUSZ9VhUnN3NH+07gd9VpoELMG7ep6bpfhMSgrVqxwb5wu8vDDD7vf+/TpYzt27HCjnzUaXkciepO9i5oMPWqS0tGD+vGuvfZa959j3LhxsX1lAJBB+qKPbHZX14e6YnRUr6nG3vdd5AHVI488Evx+VNeKKjp1G4kqN7VWqFVCj6euA52oMrKFObPat2/vvmdVKalr57zzznNdFBoAm1ogU7n0Xe2NjVBAUZBKa/yJ9pO6O9RyEdqdkJXWKw3aVau6gpb2TUbHTegxNNNKY0LUuqADaU151pTejNCYDoUEjVVRUNPr1MwbhRUN0tX7qjFLGsejuiytKeoZkdpnKJKCrkKiwthNN93kwqjCmMag5JUWlQKBXHgqPk0z1jlXNGA2O96IYXM2hl1vuC15eJpZ8H+tSFJ70a/Jyxl/aprPtbBpygO/0ivhzDLZcqZNIC36MlQrarVq1YKzWfIjP5xZNTfSGBjNZlG3zvE8Tb4f5aXP0MFUvhcyUn+zFg8AAPAdAgoAAMhfJ2oDgPwg9PTiSD+NdcmFowyyBZ+h5GhBAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAf0Oq2M2bMSHUbrbSckTVqYnF20+HDh1t+OrOt3getbYScx3lQAGSLX0b+byG97HbKA10z/Deq7FURRYaCyNOve9c9OnX3GWec4daO6dSpk8XKzz//HFw3RufE0GnCv/rqK7dWTk7RasCpLRoIZCcCCgCkw4YNG9zaIX/99ZdbzE6r52qxPC16GgtaMdcvDh8+7Bbry+pCgN7j5Cf58TVnF7p4ACAdypUr50KEWjYefPBB93PVqlVRt9XZUVW5v/vuu8HbvBXePYsWLbL4+Hg7cOBAsi4ePbZoVVvdHrm68PPPP+8eS6shd+nSxY4cOZJiuZ966in33C+//LJVrlzZrYJ7yy23uMXaIruOBgwY4FZpPuecc6J28Wzbts1atWrlVj9WWNPj7Nq1K9lzvfLKK6kuILl161Zr2bKlazFSC41WX9aKwaLVlLUqr/6+aNGiriwvvvhi2N975R04cKCVL1/etXRpheWjR49ajx49rHTp0m5l54kTJwb/Rq1S2pdvvfWWNWrUyJVNKxJrperU6H3SKtYqi/af3vv9+/cH79c+6t+/v915551un8SyVS2/I6AAQAYofMyePdtV1g0aNIi6jSrCSy65xHUPibqL1q9f71pfvvvuO3ebKsb69eu7wBBp2bJl7udnn33mun7ef//94H3z58+377//3v2cPHmyTZo0yV1Ss3nzZnvnnXdcy4/Krq6jzp3DVzifO3euayWaM2eOzZo1K9ljHDt2zIWT33//3ZVd2/3www926623Jnuu9957z5V59erVUcujUHXo0CH74osv7JtvvrFnn33WhR7veRQupk2bZuvWrbM+ffrYv//9b1f+UPPmzbOffvrJPcbQoUOtb9++dt1117nQs3TpUrvvvvvs3nvvtR9//DHs7xRgHnnkEbcPEhISXFD67bffopZT+/nqq6+21q1b29dff21vv/22Cyxdu3ZNFhhr167tHrN3796pvhdIP7p4AORbqoi9itGjI/hoVGmKKlZVojpiVwhJiVo91GohqkTVGqIWGIWWGjVquJ+XXnpp1L/1ulbUQhLZ9aMK+KWXXrITTjjBPU6LFi1cuOjYsWOKZTl48KBNmTLFTjvtNHd95MiR7u9eeOGF4OOrJUMtHyl1T+g5FCa2bNniWhJEj6nWD41VUdjyujh0e2rdQwp3qvRr1arlrmtMj6dQoULWr1+/4HW1pCQmJrqAohYbj1pJRowYYQULFnStLEOGDHGtUQoz0qtXLxs8eLALFG3atAn+ncKFnlvGjBnjAtuECROsZ8+eyco5aNAga9u2rXXr1s1dr169untOvW/6W6+F6IorrnChB7FFCwqAfEuDX3WUH3pRJR3NwoULw7ZR94IqqZSoElMLwC+//OJaHBRYdFEwUZfM4sWLk3XdpIcCgcKJR109u3fvTvVvqlSpEgwnopYDhSy1mHgUFlIbO6EWIAUTL5zIueee67pXdJ/n9NNPT3PsirpJnnnmGWvcuLFr+VDrRKhRo0ZZ3bp13eMoQI4bN86Fmsj9oHDiUVePF3hE+0gBL3Lf6LV74uLirF69emHlD7VmzRrXOqUyeJfmzZu7faeg5tFjIPYIKADyLbUanHXWWWGX0Io8lI7kdb8qxvbt29sdd9zhxmykRJWljvIVTkIDin5Xi4NCisZCZJRaGCK7k1RhZlWsZuuk53Huuece1z2kfahWGVXwatURjRF59NFH3TiUTz/91AVC7W+1zKS1H2K9b/bt2+e6iUIDrELLpk2b3ADpjLxmZBwBBQAyQUfoGlOSElWOGlz5wQcf2LfffmtNmjSxCy64wHURqetHlXJKFZvXkpFSd1NGqfVB4zU8S5YsCXaNpFfNmjVt+/bt7uJRC5GmaqslJaPUEqNxIhqrou6R8ePHu9u//PJLF9w0RkbdYgqFGgsSK3rtHg2qXblypXtt0dSpU8e9xsgQqwszdbIfAQUA0kFdBTt37nQzUDSA87XXXnODRlOjFpM333zTzWxR94BCgcatTJ06NcXxJ96MIc0a0fgIzZIJnXGTGRor0a5dO3f0r64qdbFoPEdGpjY3a9bMtQppTIZmL2kgr2au6HVktItDYzo++eQT102ix9KAXy8kaJzHihUr3P0bN250g07V4hQr6j6aPn26G6yswboawHz33XdH3faxxx5zXXEat6LWE7WcKHBGDpJF9iCgAEA6qLVB4z109KyKS03/XrdESlR5qxUkdKyJfo+8LZLGRmgwplpaNO03rSCUFpX5pptusmuvvdauuuoq15IzevToDD2GWoRUOWuQrkKWAosGt2pmS0bp9SscKJRolszZZ58dLI/2q8qq2UGaJaUZNpEzjrJCA2d10awbDaCdOXOmlS1bNuq22k/qklNQUmuYWnQ0q0jvCbJfgYDmzOUySUlJVrJkSXdUoXnnsTZszsaw6w23jUu2zcyCm8Ou1170a/Jyxv/vnAcpWdj0AsuqhDPLpLlN5wtj9x8cCJ0doqPg1M55gZylc5Po/CopTfnNL/xydt78/r2QlIH6mxYUAADgOwQUAADgOwQUAMjjXTz5vXvHOyW9RjTQvZN7EFAAAIDvEFAAAIDvEFAAZFkunAwIwOffBwQUAJnmnVpci7QBQOj3QeTSAxnFasYAsnS6dy0W5y3IduKJJ7oTegHIny0nBw4ccN8H+l4IXdQyMwgoALLEO116WivqAsgfTj755Awto5ASAgqALFGLiU4Br/VjtEIvgPyrUKFCWW458RBQAMSEvpRi9cUEAAySBQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAuT+gfPHFF9ayZUurWLGiO4PkjBkzkp2Lv0+fPu7MkkWLFrVmzZrZpk2bwrb5/fffrW3btlaiRAl3StwOHTrYvn37sv5qAABA/gwo+/fvt9q1a9uoUaOi3j9kyBAbMWKEjR071pYuXWrFihWz5s2b28GDB4PbKJx8++23NmfOHJs1a5YLPZ06dcraKwEAAHlGhk91f80117hLNGo9GT58uD355JPWqlUrd9uUKVOsfPnyrqWlTZs2tn79eps9e7YtX77c6tWr57YZOXKkXXvttfb888+7lhkAAJC/xXQMypYtW2znzp2uW8dTsmRJa9CggSUmJrrr+qluHS+ciLYvWLCga3GJ5tChQ5aUlBR2AQAAeVdMA4rCiajFJJSue/fpp1Y9DRUXF2elS5cObhNp0KBBLuh4l8qVK8ey2AAAwGdyxSyeXr162d69e4OX7du353SRAABAbgkoFSpUcD937doVdruue/fp5+7du8PuP3r0qJvZ420TKT4+3s34Cb0AAIC8K6YBpVq1ai5kzJ07N3ibxotobElCQoK7rp979uyxlStXBreZN2+eHTt2zI1VAQAAyPAsHp2vZPPmzWEDY1evXu3GkFSpUsW6detmzzzzjFWvXt0Flt69e7uZOTfccIPbvmbNmnb11Vdbx44d3VTkI0eOWNeuXd0MH2bwAACATAWUFStW2OWXXx68/vDDD7uf7dq1s0mTJlnPnj3duVJ0XhO1lDRp0sRNKy5SpEjwb6ZOnepCSdOmTd3sndatW7tzpwAAAGQqoFx22WXufCcp0dlln376aXdJiVpb3njjDd4BAACQe2fxAACA/IWAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAA8n5A+fvvv613795WrVo1K1q0qJ155pnWv39/CwQCwW30e58+fezUU0912zRr1sw2bdoU66IAAIBcKuYB5dlnn7UxY8bYSy+9ZOvXr3fXhwwZYiNHjgxuo+sjRoywsWPH2tKlS61YsWLWvHlzO3jwYKyLAwAAcqG4WD/g4sWLrVWrVtaiRQt3vWrVqvbmm2/asmXLgq0nw4cPtyeffNJtJ1OmTLHy5cvbjBkzrE2bNrEuEgAAyO8tKI0aNbK5c+faxo0b3fU1a9bYokWL7JprrnHXt2zZYjt37nTdOp6SJUtagwYNLDExMepjHjp0yJKSksIuAAAg74p5C8rjjz/uAkSNGjXshBNOcGNSBgwYYG3btnX3K5yIWkxC6bp3X6RBgwZZv379Yl1UAACQX1pQ3nnnHZs6daq98cYbtmrVKps8ebI9//zz7mdm9erVy/bu3Ru8bN++PaZlBgAAebwFpUePHq4VxRtLUqtWLdu6datrBWnXrp1VqFDB3b5r1y43i8ej6xdeeGHUx4yPj3cXAACQP8S8BeXAgQNWsGD4w6qr59ixY+53TT9WSNE4FY+6hDSbJyEhIdbFAQAAuVDMW1BatmzpxpxUqVLFzjvvPPvqq69s6NChdvfdd7v7CxQoYN26dbNnnnnGqlev7gKLzptSsWJFu+GGG2JdHAAAkAvFPKDofCcKHJ07d7bdu3e74HHvvfe6E7N5evbsafv377dOnTrZnj17rEmTJjZ79mwrUqRIrIsDAAByoZgHlOLFi7vznOiSErWiPP300+4CAAAQibV4AACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAABA/ggoO3bssNtvv93KlCljRYsWtVq1atmKFSuC9wcCAevTp4+deuqp7v5mzZrZpk2bsqMoAAAgF4p5QPnjjz+scePGVqhQIfv4449t3bp19sILL1ipUqWC2wwZMsRGjBhhY8eOtaVLl1qxYsWsefPmdvDgwVgXBwAA5EJxsX7AZ5991ipXrmwTJ04M3latWrWw1pPhw4fbk08+aa1atXK3TZkyxcqXL28zZsywNm3axLpIAAAgv7egzJw50+rVq2c333yzlStXzi666CIbP3588P4tW7bYzp07XbeOp2TJktagQQNLTEyM+piHDh2ypKSksAsAAMi7Yh5QfvjhBxszZoxVr17dPvnkE7v//vvtwQcftMmTJ7v7FU5ELSahdN27L9KgQYNciPEuaqEBAAB5V8wDyrFjx6xOnTo2cOBA13rSqVMn69ixoxtvklm9evWyvXv3Bi/bt2+PaZkBAEAeDyiamXPuueeG3VazZk3btm2b+71ChQru565du8K20XXvvkjx8fFWokSJsAsAAMi7Yh5QNINnw4YNYbdt3LjRTj/99OCAWQWRuXPnBu/XmBLN5klISIh1cQAAQC4U81k83bt3t0aNGrkunltuucWWLVtm48aNcxcpUKCAdevWzZ555hk3TkWBpXfv3laxYkW74YYbYl0cAACQC8U8oNSvX9+mT5/uxo08/fTTLoBoWnHbtm2D2/Ts2dP279/vxqfs2bPHmjRpYrNnz7YiRYrEujgAACAXinlAkeuuu85dUqJWFIUXXQAAACKxFg8AAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAMh/AWXw4MFWoEAB69atW/C2gwcPWpcuXaxMmTJ20kknWevWrW3Xrl3ZXRQAAJBLZGtAWb58ub388st2wQUXhN3evXt3+/DDD23atGm2YMEC++mnn+ymm27KzqIAAIBcJNsCyr59+6xt27Y2fvx4K1WqVPD2vXv32oQJE2zo0KF2xRVXWN26dW3ixIm2ePFiW7JkSXYVBwAA5CLZFlDUhdOiRQtr1qxZ2O0rV660I0eOhN1eo0YNq1KliiUmJkZ9rEOHDllSUlLYBQAA5F1x2fGgb731lq1atcp18UTauXOnFS5c2E4++eSw28uXL+/ui2bQoEHWr1+/7CgqAADIDy0o27dvt4ceesimTp1qRYoUiclj9urVy3UNeRc9BwAAyLtiHlDUhbN7926rU6eOxcXFuYsGwo4YMcL9rpaSw4cP2549e8L+TrN4KlSoEPUx4+PjrUSJEmEXAACQd8W8i6dp06b2zTffhN3Wvn17N87kscces8qVK1uhQoVs7ty5bnqxbNiwwbZt22YJCQmxLg4AAMiFYh5Qihcvbueff37YbcWKFXPnPPFu79Chgz388MNWunRp1xrywAMPuHDSsGHDWBcHAADkQtkySDYtw4YNs4IFC7oWFM3Qad68uY0ePTonigIAAPJrQPn888/Drmvw7KhRo9wFAAAgEmvxAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAACAvB9QBg0aZPXr17fixYtbuXLl7IYbbrANGzaEbXPw4EHr0qWLlSlTxk466SRr3bq17dq1K9ZFAQAAuVTMA8qCBQtc+FiyZInNmTPHjhw5YldddZXt378/uE337t3tww8/tGnTprntf/rpJ7vppptiXRQAAJBLxcX6AWfPnh12fdKkSa4lZeXKlXbJJZfY3r17bcKECfbGG2/YFVdc4baZOHGi1axZ04Wahg0bxrpIAAAgl8n2MSgKJFK6dGn3U0FFrSrNmjULblOjRg2rUqWKJSYmZndxAABAfmxBCXXs2DHr1q2bNW7c2M4//3x3286dO61w4cJ28sknh21bvnx5d180hw4dchdPUlJSdhYbAADk5RYUjUVZu3atvfXWW1keeFuyZMngpXLlyjErIwAAyEcBpWvXrjZr1iybP3++VapUKXh7hQoV7PDhw7Znz56w7TWLR/dF06tXL9dV5F22b9+eXcUGAAB5MaAEAgEXTqZPn27z5s2zatWqhd1ft25dK1SokM2dOzd4m6Yhb9u2zRISEqI+Znx8vJUoUSLsAgAA8q647OjW0QydDz74wJ0LxRtXoq6ZokWLup8dOnSwhx9+2A2cVdh44IEHXDhhBg8AAMiWgDJmzBj387LLLgu7XVOJ77rrLvf7sGHDrGDBgu4EbRr82rx5cxs9ejTvCAAAyJ6Aoi6etBQpUsRGjRrlLgAAAJFYiwcAAPgOAQUAAPgOAQUAAPgOAQUAAOSvU93j+Ej8/rd0bNPf/CLhzDJpbtP5ws7HpSwAAH+iBQUAAPgOAQUAAPgOAQUAAPgOAQUAAPgOg2TzmEpJK9PcpvaiX5PdlhR/app/t7DpBZkuFwAAGUELCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8J24nC4AkCvMH5TmJr/MWp38xqr/SPPvTnmga2ZLBQB5Fi0oAADAdwgoAADAdwgoAADAdwgoAADAdxgkC6Rh2JyN1nDbb6luM7PgZqu959dktyetX5Dm4y987w+LpYQzy6S5TecLO8f0OYH8/h2RllVJb5ufJOSC74kcbUEZNWqUVa1a1YoUKWINGjSwZcuW5WRxAACAT+RYQHn77bft4Ycftr59+9qqVausdu3a1rx5c9u9e3dOFQkAAOT3gDJ06FDr2LGjtW/f3s4991wbO3asnXjiifbqq6/mVJEAAEB+HoNy+PBhW7lypfXq1St4W8GCBa1Zs2aWmJiYbPtDhw65i2fv3r3uZ1JSUraU7+D+fWHX9/91KHmZCh4Ju37g8NFk2/xVIHybaA4fOGixdOivtJ8zp8v6176/0twmu97bzH4eon0GIj8POb1fc+v+BXK7yDrjePw/z63fE95jBgKBtDcO5IAdO3aoZIHFixeH3d6jR4/AxRdfnGz7vn37uu25cOHChQsXLpbrL9u3b08zK+SKWTxqadF4Fc+xY8fs999/tzJlyliBAgWylOQqV65s27dvtxIlSsSotAjFPj4+2M/Zj32c/djHeX8/BwIB+/PPP61ixYppbpsjAaVs2bJ2wgkn2K5du8Ju1/UKFSok2z4+Pt5dQp188skxK4/eIP4zZC/28fHBfs5+7OPsxz4+PnJqP5csWdK/g2QLFy5sdevWtblz54a1iuh6QkJCThQJAAD4SI518ajLpl27dlavXj27+OKLbfjw4bZ//343qwcAAORvORZQbr31Vvvll1+sT58+tnPnTrvwwgtt9uzZVr58+eNWBnUb6Twskd1HiB328fHBfs5+7OPsxz4+PuJzyX4uoJGyOV0IAACAUCwWCAAAfIeAAgAAfIeAAgAAfIeAAgAAfCdfB5RRo0ZZ1apVrUiRItagQQNbtmxZThcp1xo0aJDVr1/fihcvbuXKlbMbbrjBNmzYELbNwYMHrUuXLu4MwCeddJK1bt062cn6kH6DBw92Z1Lu1q1b8Db2cdbt2LHDbr/9drcPixYtarVq1bIVK1YE79e8As0+PPXUU939WkNs06ZNOVrm3Obvv/+23r17W7Vq1dw+PPPMM61///5h67OwnzPmiy++sJYtW7oztOp7YcaMGWH3p2d/6gztbdu2dSdv08lQO3ToYPv2pb3OULYJ5FNvvfVWoHDhwoFXX3018O233wY6duwYOPnkkwO7du3K6aLlSs2bNw9MnDgxsHbt2sDq1asD1157baBKlSqBffv2Bbe57777ApUrVw7MnTs3sGLFikDDhg0DjRo1ytFy51bLli0LVK1aNXDBBRcEHnrooeDt7OOs+f333wOnn3564K677gosXbo08MMPPwQ++eSTwObNm4PbDB48OFCyZMnAjBkzAmvWrAlcf/31gWrVqgX++uuvHC17bjJgwIBAmTJlArNmzQps2bIlMG3atMBJJ50UePHFF4PbsJ8z5qOPPgo88cQTgffff9+tdTN9+vSw+9OzP6+++upA7dq1A0uWLAksXLgwcNZZZwVuu+22QE7JtwFFixJ26dIleP3vv/8OVKxYMTBo0KAcLVdesXv3bvefZMGCBe76nj17AoUKFXJfRJ7169e7bRITE3OwpLnPn3/+GahevXpgzpw5gUsvvTQYUNjHWffYY48FmjRpkuL9x44dC1SoUCHw3HPPBW/Tfo+Pjw+8+eabx6mUuV+LFi0Cd999d9htN910U6Bt27bud/Zz1kQGlPTsz3Xr1rm/W758eXCbjz/+OFCgQAG3wG9OyJddPIcPH7aVK1e6Ji5PwYIF3fXExMQcLVtesXfvXvezdOnS7qf295EjR8L2eY0aNaxKlSrs8wxSF06LFi3C9qWwj7Nu5syZ7uzWN998s+uqvOiii2z8+PHB+7ds2eJOLBm6j7WuiLqI2cfp16hRI7e0ycaNG931NWvW2KJFi+yaa65x19nPsZWe/amf6tbR59+j7VU3Ll26NEfKnStWM461X3/91fWBRp61Vte/++67HCtXXqF1lTQuonHjxnb++ee72/SfQ2swRS7yqH2u+5A+b731lq1atcqWL1+e7D72cdb98MMPNmbMGLcUx7///W+3nx988EG3X7U0h7cfo313sI/T7/HHH3cr6ipAa+FYfR8PGDDAjX8Q9nNspWd/6qdCeai4uDh3kJlT+zxfBhRk/xH+2rVr3RERYkdLoz/00EM2Z84cN7Ab2ROudQQ5cOBAd10tKPosjx071gUUxMY777xjU6dOtTfeeMPOO+88W716tTuo0QBP9jM8+bKLp2zZsi61R85u0PUKFSrkWLnygq5du9qsWbNs/vz5VqlSpeDt2q/qWtuzZ0/Y9uzz9FMXzu7du61OnTruyEaXBQsW2IgRI9zvOhpiH2eNZjice+65YbfVrFnTtm3b5n739iPfHVnTo0cP14rSpk0bN0vqjjvusO7du7vZgMJ+jq307E/91PdLqKNHj7qZPTm1z/NlQFFzbd26dV0faOiRk64nJCTkaNlyK43LUjiZPn26zZs3z00fDKX9XahQobB9rmnI+uJnn6dP06ZN7ZtvvnFHm95FR/tqFvd+Zx9njbolI6fHa5zE6aef7n7X51pf1qH7WF0V6qNnH6ffgQMH3NiGUDpo1PewsJ9jKz37Uz91cKMDIY++y/WeaKxKjgjk42nGGsE8adIkN3q5U6dObprxzp07c7poudL999/vprB9/vnngZ9//jl4OXDgQNgUWE09njdvnpsCm5CQ4C7IvNBZPMI+zvr07bi4ODcNdtOmTYGpU6cGTjzxxMDrr78eNl1T3xUffPBB4Ouvvw60atWK6a8Z1K5du8Bpp50WnGasqbFly5YN9OzZM7gN+znjs/u++uord1HVPnToUPf71q1b070/Nc34oosuclPsFy1a5GYLMs04h4wcOdJ9met8KJp2rLnfyBz9h4h20blRPPqP0Llz50CpUqXcl/6NN97oQgxiF1DYx1n34YcfBs4//3x3AFOjRo3AuHHjwu7XlM3evXsHypcv77Zp2rRpYMOGDTlW3twoKSnJfW71/VukSJHAGWec4c7hcejQoeA27OeMmT9/ftTvYIXB9O7P3377zQUSnZOmRIkSgfbt27vgk1MK6J+cabsBAACILl+OQQEAAP5GQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAOY3/w9bLw3+tnymjQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import neps\n", + "from neps import algorithms\n", + "from functools import partial\n", + "import matplotlib.pyplot as plt\n", + "global_values = []\n", + "eta=3\n", + "for algo in [partial(algorithms.neps_hyperband, eta=eta), \n", + " partial(algorithms.hyperband, eta=eta), \n", + " partial(algorithms.neps_hyperband, sampler=\"prior\", eta=eta), \n", + " partial(algorithms.hyperband, sampler=\"prior\", eta=eta)]:\n", + " neps.run(\n", + " evaluate_pipeline,\n", + " SimpleSpace(),\n", + " root_directory=\"neps_test_runs/algo_tests\",\n", + " overwrite_root_directory=True,\n", + " optimizer=algo,\n", + " fidelities_to_spend=600\n", + " )\n", + "\n", + "plt.hist([v for n,v in enumerate(global_values) if n % 4 == 0], alpha=0.5, label='Neps HB with uniform sampler',bins=10)\n", + "plt.hist([v+1 for n,v in enumerate(global_values) if n % 4 == 1], alpha=0.5, label='HB with uniform sampler',bins=10)\n", + "plt.hist([v+2 for n,v in enumerate(global_values) if n % 4 == 2], alpha=0.5, label='Neps HB with prior sampler',bins=10)\n", + "plt.hist([v+3 for n,v in enumerate(global_values) if n % 4 == 3], alpha=0.5, label='HB with prior sampler',bins=10)\n", + "plt.legend()\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "70b97bfb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Count of 1 in algo 0: 82\n", + "Count of 3 in algo 0: 52\n", + "Count of 11 in algo 0: 16\n", + "Count of 33 in algo 0: 2\n", + "Count of 1 in algo 1: 80\n", + "Count of 3 in algo 1: 56\n", + "Count of 11 in algo 1: 14\n", + "Count of 33 in algo 1: 4\n", + "Count of 1 in algo 2: 82\n", + "Count of 3 in algo 2: 52\n", + "Count of 11 in algo 2: 16\n", + "Count of 33 in algo 2: 2\n", + "Count of 1 in algo 3: 80\n", + "Count of 3 in algo 3: 56\n", + "Count of 11 in algo 3: 14\n", + "Count of 33 in algo 3: 4\n" + ] + } + ], + "source": [ + "for i in range(4):\n", + " for j in [v for v in range(100) if v in global_values]:\n", + " print(f\"Count of {j:<3} in algo {i}: \", [v for n,v in enumerate(global_values) if n % 4 == i].count(j))\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "neural-pipeline-search (3.13.1)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/neps_examples/basic_usage/priors_test.ipynb b/neps_examples/basic_usage/priors_test.ipynb index f85ebf414..7a4dd4b9d 100644 --- a/neps_examples/basic_usage/priors_test.ipynb +++ b/neps_examples/basic_usage/priors_test.ipynb @@ -17,37 +17,23 @@ "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", "\n", "==================================================\n", - "After adding new float:\n", - "PipelineSpace SimpleSpace with parameters:\n", - "\tint_param1 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.LOW)\n", - "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tparam_4 = Float(0.0, 1.0)\n", - "\n", - "==================================================\n", - "After removing 'int_param1':\n", + "After removing 'int_param1' (in-place):\n", "PipelineSpace SimpleSpace with parameters:\n", "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tparam_4 = Float(0.0, 1.0)\n", "\n", "==================================================\n", - "After adding 'int_param1' twice and once with different upper:\n", - "Error occurred: A different parameter with the name 'int_param1' already exists in the pipeline:\n", - " Float(0.0, 1.0)\n", - " Float(0.0, 2.0)\n", + "After adding 'int_param1' (in-place):\n", "PipelineSpace SimpleSpace with parameters:\n", "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tparam_4 = Float(0.0, 1.0)\n", - "\tint_param1 = Float(0.0, 1.0)\n", + "\tint_param1 = Float(0.0, 1.0, prior=, prior_confidence=)\n", "\n", "==================================================\n", - "After removing 'int_param1':\n", + "After removing 'int_param1' (in-place):\n", "PipelineSpace SimpleSpace with parameters:\n", "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tparam_4 = Float(0.0, 1.0)\n" + "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n" ] } ], @@ -67,34 +53,24 @@ "class OtherSpace(PipelineSpace):\n", " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\", log=False)\n", "\n", - "# Test operations\n", + "# Test in-place operations\n", "pipeline = SimpleSpace()\n", "print(\"Original pipeline:\")\n", "print(pipeline)\n", "\n", "print(\"\\n\" + \"=\"*50)\n", - "print(\"After adding new float:\")\n", - "pipeline=pipeline+neps.Float(0.0, 1.0) \n", - "print(pipeline)\n", - "\n", - "print(\"\\n\" + \"=\"*50)\n", - "print(\"After removing 'int_param1':\")\n", - "pipeline=pipeline.remove(\"int_param1\") \n", + "print(\"After removing 'int_param1' (in-place):\")\n", + "pipeline.remove(\"int_param1\") # This modifies pipeline in-place\n", "print(pipeline)\n", "\n", "print(\"\\n\" + \"=\"*50)\n", - "print(\"After adding 'int_param1' twice and once with different upper:\")\n", - "pipeline=pipeline.add(neps.Float(0.0, 1.0), \"int_param1\")\n", - "pipeline=pipeline.add(neps.Float(0.0, 1.0), \"int_param1\")\n", - "try:\n", - " pipeline=pipeline.add(neps.Float(0.0, 2.0), \"int_param1\")\n", - "except ValueError as e:\n", - " print(f\"Error occurred: {e}\")\n", + "print(\"After adding 'int_param1' (in-place):\")\n", + "pipeline.add(neps.Float(0.0, 1.0), \"int_param1\") # This also modifies in-place\n", "print(pipeline)\n", "\n", "print(\"\\n\" + \"=\"*50)\n", - "print(\"After removing 'int_param1':\")\n", - "pipeline=pipeline.remove(\"int_param1\")\n", + "print(\"After removing 'int_param1' (in-place):\")\n", + "pipeline.remove(\"int_param1\") # This modifies pipeline in-place\n", "print(pipeline)" ] }, From db37873b6eff48ad8581784b47e2061620dcda79 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sat, 11 Oct 2025 19:15:54 +0200 Subject: [PATCH 076/156] Introduce NePS-space compatible Grid Search and Hyperband variants - with pre-commit changes --- neps/api.py | 8 +- neps/optimizers/algorithms.py | 152 ++++++++++++-- neps/optimizers/neps_bracket_optimizer.py | 13 +- neps/optimizers/utils/grid.py | 124 +++++++++--- neps/runtime.py | 23 ++- neps/space/neps_spaces/neps_space.py | 92 +++++---- neps/space/neps_spaces/parameters.py | 53 ++++- neps_examples/basic_usage/algo_tests.ipynb | 212 ++++++++++++++++++++ neps_examples/basic_usage/priors_test.ipynb | 48 ++--- 9 files changed, 583 insertions(+), 142 deletions(-) create mode 100644 neps_examples/basic_usage/algo_tests.ipynb diff --git a/neps/api.py b/neps/api.py index 51e563a29..4b7d4fefc 100644 --- a/neps/api.py +++ b/neps/api.py @@ -13,8 +13,6 @@ from typing import TYPE_CHECKING, Any, Concatenate, Literal import neps -import neps.optimizers.algorithms -import neps.optimizers.neps_bracket_optimizer from neps.optimizers import AskFunction, OptimizerChoice, load_optimizer from neps.optimizers.ask_and_tell import AskAndTell from neps.runtime import _launch_runtime, _save_results @@ -438,7 +436,6 @@ def __call__( converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space: pipeline_space = converted_space - space = convert_to_space(pipeline_space) if neps_classic_space_compatibility == "neps" and not isinstance( @@ -473,6 +470,11 @@ def __call__( "moasha", "mo_hyperband", "primo", + "neps_priorband", + "neps_random_search", + "complex_random_search", + "neps_bracket_optimizer", + "neps_hyperband", } is_multi_fidelity = _optimizer_info["name"] in multi_fidelity_optimizers diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index f10cb878e..5eee5e692 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -171,10 +171,12 @@ def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 *, bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], eta: int, - sampler: Literal["uniform", "prior", "priorband", "mopriorsampler"] - | PriorBandSampler - | MOPriorSampler - | Sampler, + sampler: ( + Literal["uniform", "prior", "priorband", "mopriorsampler"] + | PriorBandSampler + | MOPriorSampler + | Sampler + ), bayesian_optimization_kick_in_point: int | float | None, sample_prior_first: bool | Literal["highest_fidelity"], # NOTE: This is the only argument to get a default, since it @@ -468,7 +470,9 @@ def random_search( pipeline_space: The search space to sample from. use_priors: Whether to use priors when sampling. ignore_fidelity: Whether to ignore fidelity when sampling. - In this case, the max fidelity is always used. + Setting this to "highest fidelity" will always sample at max fidelity. + Setting this to True will randomly sample from the fidelity like any other + parameter. """ if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) @@ -535,7 +539,8 @@ def random_search( def grid_search( pipeline_space: SearchSpace | PipelineSpace, *, - ignore_fidelity: bool = False, + ignore_fidelity: bool | Literal["highest fidelity"] = False, + size_per_numerical_dimension: int = 5, ) -> GridSearch: """A simple grid search algorithm which discretizes the search space and evaluates all possible configurations. @@ -543,7 +548,11 @@ def grid_search( Args: pipeline_space: The search space to sample from. ignore_fidelity: Whether to ignore fidelity when sampling. - In this case, the max fidelity is always used. + Setting this to "highest fidelity" will always sample at max fidelity. + Setting this to True will make a grid over the fidelity like any other + parameter. + size_per_numerical_dimension: The number of points to use per numerical + dimension when discretizing the space. """ from neps.optimizers.utils.grid import make_grid @@ -552,23 +561,95 @@ def grid_search( if converted_space is not None: pipeline_space = converted_space else: - raise ValueError( - "This optimizer only supports HPO search spaces, please use a NePS" - " space-compatible optimizer." + return neps_grid_search( + pipeline_space, + ignore_fidelity=ignore_fidelity, + size_per_numerical_dimension=size_per_numerical_dimension, ) if any( parameter.prior is not None for parameter in pipeline_space.searchables.values() ): - raise ValueError("Grid search does not support priors.") + logger.warning("Grid search does not support priors, they will be ignored.") if ignore_fidelity and pipeline_space.fidelity is None: logger.warning( "Warning: You are using ignore_fidelity, but no fidelity is defined in the" " search space. Consider setting ignore_fidelity to False." ) + if not ignore_fidelity and pipeline_space.fidelity is not None: + raise ValueError( + "Fidelities are not supported for GridSearch natively. Consider setting the" + " fidelity to a constant value, or setting ignore_fidelity to True to sample" + " from it like any other parameter or 'highest fidelity' to always sample at" + f" max fidelity. Got fidelity: {pipeline_space.fidelities} " + ) return GridSearch( - configs_list=make_grid(pipeline_space, ignore_fidelity=ignore_fidelity) + configs_list=make_grid( + pipeline_space, + ignore_fidelity=ignore_fidelity, + size_per_numerical_hp=size_per_numerical_dimension, + ) + ) + + +def neps_grid_search( + pipeline_space: PipelineSpace, + *, + ignore_fidelity: bool | Literal["highest fidelity"] = False, + size_per_numerical_dimension: int = 5, +) -> GridSearch: + """A simple grid search algorithm which discretizes the search + space and evaluates all possible configurations. + + Args: + pipeline_space: The search space to sample from. + ignore_fidelity: Whether to ignore fidelity when sampling. + Setting this to "highest fidelity" will always sample at max fidelity. + Setting this to True will make a grid over the fidelity like any other + parameter. + size_per_numerical_dimension: The number of points to use per numerical + dimension when discretizing the space. + """ + from neps.optimizers.utils.grid import make_grid + + if not isinstance(pipeline_space, PipelineSpace): + raise ValueError( + "This optimizer only supports NePS spaces, please use a classic" + " search space-compatible optimizer." + ) + parameters = pipeline_space.get_attrs().values() + non_fid_parameters = [ + parameter + for parameter in parameters + if parameter not in pipeline_space.fidelity_attrs.values() + ] + if any( + parameter.has_prior # type: ignore + for parameter in non_fid_parameters + if isinstance(parameter, Resolvable) + and isinstance(parameter, Integer | Float | Categorical) + ): + logger.warning("Grid search does not support priors, they will be ignored.") + if not pipeline_space.fidelity_attrs and ignore_fidelity: + logger.warning( + "Warning: You are using ignore_fidelity, but no fidelity is defined in the" + " search space. Consider setting ignore_fidelity to False." + ) + if pipeline_space.fidelity_attrs and not ignore_fidelity: + raise ValueError( + "Fidelities are not supported for GridSearch natively. Consider setting the" + " fidelity to a constant value, or setting ignore_fidelity to True to sample" + " from it like any other parameter or 'highest fidelity' to always sample at" + f" max fidelity. Got fidelity: {pipeline_space.fidelity_attrs} " + ) + + return GridSearch( + configs_list=make_grid( + pipeline_space, + ignore_fidelity=ignore_fidelity, + size_per_numerical_hp=size_per_numerical_dimension, + ) ) @@ -797,7 +878,7 @@ def hyperband( eta: int = 3, sampler: Literal["uniform", "prior"] = "uniform", sample_prior_first: bool | Literal["highest_fidelity"] = False, -) -> BracketOptimizer: +) -> BracketOptimizer | _NePSBracketOptimizer: """Another bandit-based optimization algorithm that uses a _fidelity_ parameter, very similar to [`successive_halving`][neps.optimizers.algorithms.successive_halving], but hedges a bit more on the safe side, just incase your _fidelity_ parameters @@ -844,12 +925,14 @@ def hyperband( """ if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) - if converted_space is not None: + if converted_space: pipeline_space = converted_space else: - raise ValueError( - "This optimizer only supports HPO search spaces, please use a NePS" - " space-compatible optimizer." + return neps_hyperband( + pipeline_space, + eta=eta, + sampler=sampler, + sample_prior_first=sample_prior_first, ) return _bracket_optimizer( pipeline_space=pipeline_space, @@ -864,6 +947,39 @@ def hyperband( ) +def neps_hyperband( + pipeline_space: PipelineSpace, + *, + eta: int = 3, + sampler: Literal["uniform", "prior"] = "uniform", + sample_prior_first: bool | Literal["highest_fidelity"] = False, +) -> _NePSBracketOptimizer: + """ + Hyperband optimizer for NePS search spaces. + Args: + pipeline_space: The search space to sample from. + eta: The reduction factor used for building brackets + sampler: The type of sampling procedure to use: + + * If `#!python "uniform"`, samples uniformly from the space when + it needs to sample. + * If `#!python "prior"`, samples from the prior + distribution built from the `prior` and `prior_confidence` + values in the search space. + + sample_prior_first: Whether to sample the prior configuration first, + and if so, should it be at the highest fidelity level. + """ + return _neps_bracket_optimizer( + pipeline_space=pipeline_space, + bracket_type="hyperband", + eta=eta, + sampler="prior" if sampler == "prior" else "uniform", + sample_prior_first=sample_prior_first, + early_stopping_rate=None, + ) + + def mo_hyperband( pipeline_space: SearchSpace | PipelineSpace, *, @@ -1716,6 +1832,7 @@ def neps_priorband( neps_random_search, complex_random_search, neps_priorband, + neps_hyperband, ) } @@ -1736,4 +1853,5 @@ def neps_priorband( "neps_random_search", "complex_random_search", "neps_priorband", + "neps_hyperband", ] diff --git a/neps/optimizers/neps_bracket_optimizer.py b/neps/optimizers/neps_bracket_optimizer.py index e184143b4..e2d07f763 100644 --- a/neps/optimizers/neps_bracket_optimizer.py +++ b/neps/optimizers/neps_bracket_optimizer.py @@ -61,7 +61,7 @@ class _NePSBracketOptimizer: """The sampler used to generate new trials.""" sampler: NePSPriorBandSampler | DomainSampler - def __call__( # noqa: C901 + def __call__( # noqa: C901, PLR0912 self, trials: Mapping[str, Trial], budget_info: BudgetInfo | None, @@ -137,10 +137,17 @@ def __call__( # noqa: C901 if isinstance(self.sampler, NePSPriorBandSampler): config = self.sampler.sample_config(table, rung=rung) elif isinstance(self.sampler, DomainSampler): + environment_values = {} + fidelity_attrs = self.space.fidelity_attrs + assert len(fidelity_attrs) == 1, "TODO: [lum]" + for fidelity_name, _fidelity_obj in fidelity_attrs.items(): + environment_values[fidelity_name] = self.rung_to_fid[rung] _, resolution_context = neps_space.resolve( - self.space, domain_sampler=self.sampler + self.space, + domain_sampler=self.sampler, + environment_values=environment_values, ) - config = neps_space.NepsCompatConverter.to_neps_config( + config = neps_space.NepsCompatConverter.to_neps_config( # type: ignore[assignment] resolution_context ) config = dict(**config) diff --git a/neps/optimizers/utils/grid.py b/neps/optimizers/utils/grid.py index aa152c66c..e3b95f98a 100644 --- a/neps/optimizers/utils/grid.py +++ b/neps/optimizers/utils/grid.py @@ -1,10 +1,11 @@ from __future__ import annotations from itertools import product -from typing import Any +from typing import Any, Literal import torch +from neps import Categorical, Fidelity, Float, Integer, PipelineSpace from neps.space import ( Domain, HPOCategorical, @@ -13,13 +14,15 @@ HPOInteger, SearchSpace, ) +from neps.space.neps_spaces import neps_space +from neps.space.neps_spaces.sampling import RandomSampler -def make_grid( - space: SearchSpace, +def make_grid( # noqa: PLR0912, PLR0915, C901 + space: SearchSpace | PipelineSpace, *, size_per_numerical_hp: int = 10, - ignore_fidelity: bool = True, + ignore_fidelity: bool | Literal["highest fidelity"] = False, ) -> list[dict[str, Any]]: """Get a grid of configurations from the search space. @@ -39,29 +42,102 @@ def make_grid( A list of configurations from the search space. """ param_ranges: dict[str, list[Any]] = {} - for name, hp in space.items(): - match hp: - case HPOCategorical(): - param_ranges[name] = list(hp.choices) - case HPOConstant(): - param_ranges[name] = [hp.value] - case HPOInteger() | HPOFloat(): - if hp.is_fidelity and ignore_fidelity: - param_ranges[name] = [hp.upper] - continue + if isinstance(space, SearchSpace): + for name, hp in space.items(): + match hp: + case HPOCategorical(): + param_ranges[name] = list(hp.choices) + case HPOConstant(): + param_ranges[name] = [hp.value] + case HPOInteger() | HPOFloat(): + if hp.is_fidelity: + match ignore_fidelity: + case "highest fidelity": + param_ranges[name] = [hp.upper] + continue + case True: + param_ranges[name] = [hp.lower, hp.upper] + case False: + raise ValueError( + "Grid search does not support fidelity " + "natively. Please use the" + "ignore_fidelity parameter." + ) + if hp.domain.cardinality is None: + steps = size_per_numerical_hp + else: + steps = min(size_per_numerical_hp, hp.domain.cardinality) - if hp.domain.cardinality is None: - steps = size_per_numerical_hp + xs = torch.linspace(0, 1, steps=steps) + numeric_values = hp.domain.cast(xs, frm=Domain.unit_float()) + uniq_values = torch.unique(numeric_values).tolist() + param_ranges[name] = uniq_values + case _: + raise NotImplementedError(f"Unknown Parameter type: {type(hp)}\n{hp}") + keys = list(space.keys()) + values = product(*param_ranges.values()) + return [dict(zip(keys, p, strict=False)) for p in values] + if isinstance(space, PipelineSpace): + fid_ranges: dict[str, list[float]] = {} + for name, hp in space.get_attrs().items(): + if isinstance(hp, Categorical): + if isinstance(hp.choices, tuple): # type: ignore[unreachable] + param_ranges[name] = list(range(len(hp.choices))) else: - steps = min(size_per_numerical_hp, hp.domain.cardinality) - + raise NotImplementedError( + "Grid search only supports categorical choices as tuples." + ) + elif isinstance(hp, Fidelity): + if ignore_fidelity == "highest fidelity": # type: ignore[unreachable] + fid_ranges[name] = [hp.max_value] + continue + if ignore_fidelity is True: + fid_ranges[name] = [hp.min_value, hp.max_value] + continue + raise ValueError( + "Grid search does not support fidelity natively." + " Please use the ignore_fidelity parameter." + ) + elif isinstance(hp, Integer | Float): + steps = size_per_numerical_hp # type: ignore[unreachable] xs = torch.linspace(0, 1, steps=steps) - numeric_values = hp.domain.cast(xs, frm=Domain.unit_float()) + numeric_values = xs * (hp.max_value - hp.min_value) + hp.min_value + if isinstance(hp, Integer): + numeric_values = torch.round(numeric_values) uniq_values = torch.unique(numeric_values).tolist() param_ranges[name] = uniq_values - case _: - raise NotImplementedError(f"Unknown Parameter type: {type(hp)}\n{hp}") - values = product(*param_ranges.values()) - keys = list(space.keys()) + else: + raise NotImplementedError( + f"Parameter type: {type(hp)}\n{hp} not supported yet in GridSearch" + ) + keys = list(param_ranges.keys()) + values = product(*param_ranges.values()) + config_dicts = [dict(zip(keys, p, strict=False)) for p in values] + keys_fid = list(fid_ranges.keys()) + values_fid = product(*fid_ranges.values()) + fid_dicts = [dict(zip(keys_fid, p, strict=False)) for p in values_fid] + configs = [] + random_config = neps_space.NepsCompatConverter.to_neps_config( + neps_space.resolve( + pipeline=space, + domain_sampler=RandomSampler(predefined_samplings={}), + environment_values=fid_dicts[0], + )[1] + ) + + for config_dict in config_dicts: + for fid_dict in fid_dicts: + new_config = {} + for param in random_config: + for key in config_dict: + if key in param: + new_config[param] = config_dict[key] + for key in fid_dict: + if key in param: + new_config[param] = fid_dict[key] + configs.append(new_config) + return configs - return [dict(zip(keys, p, strict=False)) for p in values] + raise TypeError( + f"Unsupported space type: {type(space)}" + ) # More informative than None diff --git a/neps/runtime.py b/neps/runtime.py index a57c0a334..99b06be45 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -35,6 +35,7 @@ WorkerFailedToGetPendingTrialsError, WorkerRaiseError, ) +from neps.space.neps_spaces.neps_space import NepsCompatConverter, PipelineSpace from neps.state import ( BudgetInfo, DefaultReportValues, @@ -324,7 +325,7 @@ def _check_shared_error_stopping_criterion(self) -> str | Literal[False]: return False - def _check_global_stopping_criterion( + def _check_global_stopping_criterion( # noqa: C901 self, trials: Mapping[str, Trial], ) -> str | Literal[False]: @@ -351,7 +352,13 @@ def _check_global_stopping_criterion( if self.settings.fidelities_to_spend is not None and hasattr( self.optimizer, "space" ): - fidelity_name = next(iter(self.optimizer.space.fidelities.keys())) + if not isinstance(self.optimizer.space, PipelineSpace): + fidelity_name = next(iter(self.optimizer.space.fidelities.keys())) + else: + fidelity_name = next(iter(self.optimizer.space.fidelity_attrs.keys())) + fidelity_name = ( + f"{NepsCompatConverter._ENVIRONMENT_PREFIX}{fidelity_name}" + ) count = sum( trial.config[fidelity_name] for _, trial in trials.items() @@ -745,7 +752,7 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 best_config = self.state.all_best_configs[-1] # Latest best best_config_text = ( - f"# Best config:" + "# Best config:" f"\n\n Config ID: {best_config['trial_id']}" f"\n Objective to minimize: {best_config['score']}" f"\n Config: {best_config['config']}" @@ -813,7 +820,7 @@ def load_incumbent_trace( # noqa: D103 if state.all_best_configs: best_config = state.all_best_configs[-1] best_config_text = ( - f"# Best config:" + "# Best config:" f"\n\n Config ID: {best_config['trial_id']}" f"\n Objective to minimize: {best_config['score']}" f"\n Config: {best_config['config']}" @@ -859,9 +866,11 @@ def _save_results( raise RuntimeError(f"Trial '{trial_id}' not found in '{root_directory}'") report = trial.set_complete( - report_as=Trial.State.SUCCESS.value - if result.exception is None - else Trial.State.CRASHED.value, + report_as=( + Trial.State.SUCCESS.value + if result.exception is None + else Trial.State.CRASHED.value + ), objective_to_minimize=result.objective_to_minimize, cost=result.cost, learning_curve=result.learning_curve, diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 4b8172810..9228c5a7c 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -1241,6 +1241,43 @@ class NEPSSpace(PipelineSpace): return NEPSSpace() +ONLY_NEPS_ALGORITHMS_NAMES = [ + "neps_random_search", + "neps_priorband", + "complex_random_search", + "neps_hyperband", + "complex_hyperband", +] +CLASSIC_AND_NEPS_ALGORITHMS_NAMES = [ + "random_search", + "priorband", + "hyperband", + "grid_search", +] + + +# Lazy initialization to avoid circular imports +def _get_only_neps_algorithms_functions() -> list[Callable]: + """Get the list of NEPS-only algorithm functions lazily.""" + return [ + algorithms.neps_random_search, + algorithms.neps_priorband, + algorithms.complex_random_search, + algorithms.neps_hyperband, + algorithms.neps_grid_search, + ] + + +def _get_classic_and_neps_algorithms_functions() -> list[Callable]: + """Get the list of classic and NEPS algorithm functions lazily.""" + return [ + algorithms.random_search, + algorithms.priorband, + algorithms.hyperband, + algorithms.grid_search, + ] + + def check_neps_space_compatibility( optimizer_to_check: ( algorithms.OptimizerChoice @@ -1281,38 +1318,15 @@ def check_neps_space_compatibility( inner_optimizer = inner_optimizer.func only_neps_algorithm = ( - optimizer_to_check - in ( - algorithms.neps_random_search, - algorithms.neps_priorband, - algorithms.complex_random_search, - ) - or ( - inner_optimizer - and inner_optimizer - in ( - algorithms.neps_random_search, - algorithms.neps_priorband, - algorithms.complex_random_search, - ) - ) + optimizer_to_check in _get_only_neps_algorithms_functions() + or (inner_optimizer and inner_optimizer in _get_only_neps_algorithms_functions()) or ( - optimizer_to_check[0] - in ( - "neps_random_search", - "neps_priorband", - "complex_random_search", - ) + optimizer_to_check[0] in ONLY_NEPS_ALGORITHMS_NAMES if isinstance(optimizer_to_check, tuple) else False ) or ( - optimizer_to_check - in ( - "neps_random_search", - "neps_priorband", - "complex_random_search", - ) + optimizer_to_check in ONLY_NEPS_ALGORITHMS_NAMES if isinstance(optimizer_to_check, str) else False ) @@ -1320,35 +1334,19 @@ def check_neps_space_compatibility( if only_neps_algorithm: return "neps" neps_and_classic_algorithm = ( - optimizer_to_check - in ( - algorithms.random_search, - algorithms.priorband, - ) + optimizer_to_check in _get_classic_and_neps_algorithms_functions() or ( inner_optimizer - and inner_optimizer - in ( - algorithms.random_search, - algorithms.priorband, - ) + and inner_optimizer in _get_classic_and_neps_algorithms_functions() ) or optimizer_to_check == "auto" or ( - optimizer_to_check[0] - in ( - "random_search", - "priorband", - ) + optimizer_to_check[0] in CLASSIC_AND_NEPS_ALGORITHMS_NAMES if isinstance(optimizer_to_check, tuple) else False ) or ( - optimizer_to_check - in ( - "random_search", - "priorband", - ) + optimizer_to_check in CLASSIC_AND_NEPS_ALGORITHMS_NAMES if isinstance(optimizer_to_check, str) else False ) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 53322fb3a..380a92ecf 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -89,9 +89,13 @@ def __str__(self) -> str: def __eq__(self, other: Fidelity | object) -> bool: if not isinstance(other, Fidelity): - raise ValueError("__eq__ only available to compare to Fidelity objects.") + return False return self._domain == other._domain + def __hash__(self) -> int: + """Get the hash of the fidelity based on its domain.""" + return hash(self._domain) + @property def min_value(self) -> int | float: """Get the minimum value of the fidelity domain. @@ -580,13 +584,28 @@ def __str__(self) -> str: def __eq__(self, other: Categorical | object) -> bool: if not isinstance(other, Categorical): - raise ValueError("__eq__ only available to compare to Categorical objects.") + return False return ( self.prior == other.prior and self.prior_confidence == other.prior_confidence and self.choices == other.choices ) + def __hash__(self) -> int: + """Get the hash of the categorical domain based on its attributes.""" + try: + choices_hash = hash(self.choices) + except TypeError: + # If choices are not hashable (e.g., contain mutable objects), use id + choices_hash = id(self.choices) + return hash( + ( + self._prior if self._prior is not _UNSET else None, + self._prior_confidence if self._prior_confidence is not _UNSET else None, + choices_hash, + ) + ) + @property def min_value(self) -> int: """Get the minimum value of the categorical domain. @@ -775,7 +794,7 @@ def __str__(self) -> str: def __eq__(self, other: Float | object) -> bool: if not isinstance(other, Float): - raise ValueError("__eq__ only available to compare to Float objects.") + return False return ( self._prior == other._prior and self._prior_confidence == other._prior_confidence @@ -784,6 +803,18 @@ def __eq__(self, other: Float | object) -> bool: and self._log == other._log ) + def __hash__(self) -> int: + """Get the hash of the float domain based on its attributes.""" + return hash( + ( + self._prior if self._prior is not _UNSET else None, + self._prior_confidence if self._prior_confidence is not _UNSET else None, + self.min_value, + self.max_value, + self._log, + ) + ) + @property def min_value(self) -> float: """Get the minimum value of the floating-point domain. @@ -970,7 +1001,7 @@ def __str__(self) -> str: def __eq__(self, other: Integer | object) -> bool: if not isinstance(other, Integer): - raise ValueError("__eq__ only available to compare to Integer objects.") + return False return ( self._prior == other._prior and self._prior_confidence == other._prior_confidence @@ -979,6 +1010,18 @@ def __eq__(self, other: Integer | object) -> bool: and self._log == other._log ) + def __hash__(self) -> int: + """Get the hash of the integer domain based on its attributes.""" + return hash( + ( + self._prior if self._prior is not _UNSET else None, + self._prior_confidence if self._prior_confidence is not _UNSET else None, + self.min_value, + self.max_value, + self._log, + ) + ) + @property def min_value(self) -> int: """Get the minimum value of the integer domain. @@ -1157,7 +1200,7 @@ def __str__(self) -> str: def __eq__(self, other: Operation | object) -> bool: if not isinstance(other, Operation): - raise ValueError("__eq__ only available to compare to Operation objects.") + return False return ( self.operator == other.operator and self.args == other.args diff --git a/neps_examples/basic_usage/algo_tests.ipynb b/neps_examples/basic_usage/algo_tests.ipynb new file mode 100644 index 000000000..9a913f13b --- /dev/null +++ b/neps_examples/basic_usage/algo_tests.ipynb @@ -0,0 +1,212 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 11, + "id": "938adc12", + "metadata": {}, + "outputs": [], + "source": [ + "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled, Integer, Fidelity\n", + "import neps\n", + "\n", + "# Define the NEPS space for the neural network architecture\n", + "class SimpleSpace(PipelineSpace):\n", + " int_param1 = Fidelity(Integer(1,100))\n", + " int_param2 = Integer(1,100, prior=50, prior_confidence=\"medium\")\n", + " int_param3 = Integer(1,100, prior=50, prior_confidence=\"high\")\n", + " cat = Categorical(['option1', 'option2', 'option3'])#, prior=0, prior_confidence='low')\n", + "global_values = []\n", + "def evaluate_pipeline(int_param1, int_param2, *args, **kwargs):\n", + " # Dummy evaluation function\n", + " global_values.append(int_param1)\n", + " return - int_param2/50 +int_param1" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "89427fd0", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Grid search does not support priors, they will be ignored.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# Configs: 40\n", + "\n", + " success: 40\n", + "\n", + "# Best Found (config 30):\n", + "\n", + " objective_to_minimize: 0.48\n", + " config: {'SAMPLING__Resolvable.int_param2::integer__1_100_False': 26.0, 'SAMPLING__Resolvable.int_param3::integer__1_100_False': 1.0, 'SAMPLING__Resolvable.cat::categorical__3': 0, 'ENVIRONMENT__int_param1': 1}\n", + " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\basic_usage\\neps_test_runs\\algo_tests\\configs\\config_30\n", + "\n" + ] + } + ], + "source": [ + "from neps.optimizers.utils.grid import make_grid\n", + "from pprint import pprint\n", + "from functools import partial\n", + "\n", + "# pprint(make_grid(SimpleSpace(), size_per_numerical_hp=2, ignore_fidelity=False))\n", + "\n", + "neps.run(\n", + " evaluate_pipeline,\n", + " SimpleSpace(),\n", + " root_directory=\"neps_test_runs/algo_tests\",\n", + " overwrite_root_directory=True,\n", + " optimizer=partial(neps.algorithms.neps_grid_search, ignore_fidelity=True, size_per_numerical_dimension=5),\n", + " evaluations_to_spend=40\n", + ")\n", + "neps.status(\"neps_test_runs/algo_tests\",print_summary=True)\n", + "print()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "c6197a65", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAGdCAYAAAA44ojeAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAASxdJREFUeJzt3Qm8lGP/x/Ffe0ilXVpUpNKCSqVshYQs9fDwpCeVIrIU8mRpESJZo7IWqidCWf5EKgrtlDZUQtGm1FFo0fxf38tzj5k5M6dzTnPO3DPzeb+Mc2bmPjPX3DPN/bt/1++6rgKBQCBgAAAAPlIw0Q0AAACIRIACAAB8hwAFAAD4DgEKAADwHQIUAADgOwQoAADAdwhQAACA7xCgAAAA3ylsSWj//v32008/2eGHH24FChRIdHMAAEA2aG7YX3/91SpXrmwFCxZMvQBFwUnVqlUT3QwAAJAL69atsypVqqRegKLMifcCS5YsmejmAACAbMjIyHAJBu84nnIBiteto+CEAAUAgOSSnfIMimQBAIDvEKAAAADfIUABAAC+k5Q1KNkdyrRv3z77888/E90UAAlWqFAhK1y4MNMSAEkkJQOUPXv22IYNG+y3335LdFMA+MShhx5qRx55pBUtWjTRTQGQjgGKJnFbu3atO2PSRDD6MuKsCUhfyqbqpGXLli3uu+HYY4894ARRABIv5QIUfREpSNE4a50xAcAhhxxiRYoUse+//959RxQvXjzRTQJwACl7GsEZEoBQfCcAyYV/sQAAwHcIUBCV6namTJmS5TZXXXWVXXzxxeaH9n311VfWvHlzl7o/4YQTEtKmZPDdd9+5fbd48eJENwUA0qsGJSuPTvsm356rz9m1c7S9Dvbbt2/PFBR89NFHduaZZ9ovv/xipUuXDl736IBcs2ZNu+mmm6xnz55xa79GQR1xxBHBg1qNGjXsiy++8M3BP7R9MnDgQDvssMPs66+/thIlSiS0bQCAg5dWAUoq0YFY6xD9/vvv9vbbb1uvXr2sVq1a1qZNm7g8fqVKlczPItu3Zs0aO//886169eq5fkwVTzIE9cDYTwDyA108SapChQruIK3Mxo033uh+fv755zGHWZYvX95ee+214G3KhGhOCM8nn3xixYoVC84dE9qFoseWE0880d1+xhlnhD3+8OHD3WOVLVvWrr/+etu7d2+OuoVuvvnmsMfU73pN/fr1szJlyrjXOWjQoLC/CW2ffl+0aJHdc8897ndv26VLl1rr1q3dCA61TRmmnTt3ZmrLfffd54akH3fcccEukFdffdVOPfVU97dNmza1b775xhYsWGBNmjRxGZp27dq5YauxKOPVqVMnt9/1GBraOmbMmOD9t99+u9WuXduNNFMG7O677w7bb3oNeo9eeOEFq1atmnvO6667zk08OGzYMLdP9BlQ2yP3y6hRo1z79Lx67ND3PZply5a57fUcFStWtM6dO9vPP/8c9n707t3bvU/lypWztm3bZvl4ABAPBChJTsHH1KlT7YcffrBmzZpF3UYHrdNOO811D3kHz5UrV7rsi2o35OOPP3YH4mhDs+fPn+9+fvjhh65r5Y033gjeN3PmTJe90M8XX3zRxo4d6y4HS4+lLpt58+a5A7KCj2nTpkXdVm06/vjj7ZZbbnG/33rrrbZr1y53IFU3kAKLSZMmufbrQBtq+vTpLhulx37nnXfCuozuuusuF/RpBtJ//etfLmB6/PHHbfbs2bZ69WobMGBAzPYr4FixYoW99957bl8raNDB3aOlxrWftI0e89lnn7VHH3007DG0X/X3en//+9//2vPPP++yROvXr3fv14MPPujaqH0U+dwdO3a0JUuWuCDp8ssvd22IRt2KCuIUfC5cuNA916ZNm+yyyy7L9H4oa/Lpp5/a6NGjY75uAIgXunh8RAfIyPqJWFP1V6lSxf3cvXu3m/dFB3AFIbHoLPjpp592v8+aNcsdkHQWrqClTp067ufpp58e9W+VBRBlISK7VhQAPPnkk25iPD2ODqA66Pfo0cMORsOGDV2QIMo+6Dn0uGeffXambdUmBRHad177dMD/448/7KWXXnKBjugx2rdv7w7syhSI7nvuueeCXRbKoIiCHC9ToPqeK664wj1/y5Yt3W3du3fPMhBTwKh9rIyLHH300WH3K7Dw6D4938SJE10Q5NH7qgyKgpl69eq52iMFU++++64bMquMj16LgsPQ4PTSSy+1q6++2v0+ZMgQF3yNGDHCRo4cmamd2idq5/333x+8Tc+peYSUNVKWx3sPFCgmwt5NmzPdVqRihYS0BUD+IUDxER2AdKYdSmfHV155ZaZtdRavA5cCFGU4lBlQd4hqUaJR8KEDrboldPatgMULUHSw/eyzz8IOjtmlzIWCE4+6etS1crAUoITS427enPlAFYsyBo0aNQoGJ6LgQgd9HeS9AKVBgwZR6ylCnz9029DbsmqP3gdlMZSBOeecc1xX0imnnBK8/5VXXrEnnnjCZUnU7aR1o1RTFEqBi97j0OfUvg6dzyNaO1q0aJHpeqxRO8qyKMCJVlistnkBSuPGjWO+VgDICwQoPqKD6THHHBN2m9L50aguRKN6vCBBgYzqEWIFKDq4KoBRcKKLtlWAojNwdYGo/iH0AJpdmp0zsjtJQUAsOriqWypUtJqVnD5uboUGMLGe31sqIfK2rNqjmg7NWqpshzIYKl5WfY7qdebMmeO6XgYPHuyyNKVKlXLZk4cffjhmG7znjPd+UXDkZZUihdYoxdpPAJBXqEFJETqzVk1JLDqQqejzzTfftOXLl1urVq1clkAZGHX9qCsi1kHIyzDEY2VodRepTiRUXszJUbduXZcdUC2KR/UTXtdIftBr7dKli40bN84ee+wxe+aZZ9ztylZptNGdd97p9ru6TxTMxMvcuXMzXdf+iOakk05ynwdlaxQch14ISgAkEgFKklJaf+PGje7ApgLQl19+2S666KIs/0bdOiq21OgQpfR1sFbdyvjx42PWn4hGi2hEiFdAuWPHjly3WwWZKsZUbciqVatcnYlGkcSbMhSaI0YBgh5f3Rg33HCDG6HiddnkJRXQKhhUMa0CANUXeUGCAhLVqChrom4UdfVMnjw5bs+tz4PqSFRDov3rdQFGo6zOtm3bXI2NMmlqz/vvv29du3aNS0AKALlFgJKklAVQCl5nuhqyes0117hCyKwoCNFBJ3JIb+RtkVSAqoOoMi0ajnugQCgr6tLQKBPVu2jU0K+//mr//ve/Ld40GkkHWh189Tz/+Mc/XDeLikLzg7JO/fv3d1kqBYHKcCkgkQsvvND69OnjggYFi8qoaJ/Ei7qO9Fx6bgWCCkpVZBuN3k9llvQZUK2MugI1nFjdh6xdAyCRCgQiCwKSQEZGhuu315l8ZGGhRm5oSXXVaLBiKdKNuvKUjUnUEgR+HsXDdwPg7+N3JE6RAACA7xCgAAAA32GYMZBCkrDHFgCiIkABkNCaEmaFBRANXTwAAMB3CFAAAIDvEKAAAADfIUABAAC+Q5EsgINC0SuAvEAGBUnpqquuOuBsqR999JGbWXX79u2W6PZp+G/Pnj3ditJqU14skJgqtOyCptsHkN7SK4Myc2j+PdeZ/XN8QHvxxRdt6NCh9p///Cd4+5QpU+ySSy7J0fwW+oL/+OOP3e/FihWzmjVrunVfrrvuOneb1l156KGHbOzYsW6xQS0EqAXsevToYVdffXXMg/2ZZ55pv/zyi1unJZRWwtUBxTuo6Lq3Oq/Wc9HifO3atbPhw4fbEUccYfHw+OOPh+0TvWata6NVg/0gsn1aaFH7W/tR70e5cuUS2j4A8DsyKD6i9UEefPBBFwQcLAUbGzZssBUrVthll13mVq3VonHeYnKPPvqoDRkyxN2vlX51dh/PTMM999zjnl+r9mq15FmzZtmNN94Yt8fXWg6RgZKfRLZPqwRrccdTTjnFKlWq5BZgzCkFPPv27YtzS1OPAvD9+/cnuhkA8jNAGTVqlFshVQv86NKiRQt77733whbj0oGwbNmyVqJECevYsaNt2rQp7DF0wDr//PPdarMVKlSw2267jS/d/znrrLPcwUtZlKx88sknduqpp7rMR9WqVd2Bf9euXWHbaP/qsXS2PmjQIJcheeutt9x9+qlsyqWXXuoWTmvUqJF1797dbr311ri9lsMPP9w9/1FHHeUyL126dLHPP/885vZ67gsuuCB4XZkQdYUo8+DRys3PPfdcpi4U/a6MkbIW+htdvvvuu+DfLVq0yJo0aeL2iQKEr7/+OkfdQuqOCX1MZUIUfGi15Lp167rP+rnnnusCMk9k+2644Qb32dfjKMMku3fvdu+d/h0oOG3VqpUtWLAgU1v0b6xx48YuG6b3XtkiPZ4yVspIKUP17LPPus9A165d3b7Xvgr9txnNyJEj3edCz63H0IrPHu13tUevU/+e9d4oyPJoX6htr776qp150YVW8ujq1qJtW/tmzRr3GrS/tV+UOduyZUvYful4VRcbMny4Va5Xz8oeU8uuvfZa27NnT8x2aj/dPmiQHX1CIytd42hr2e5ct2883vuhz7VWbdZ+0r4GkEYBSpUqVeyBBx5wX/gLFy601q1b20UXXWTLly9392sJ+bffftsmTZrkDhg//fSTdejQIezMRsGJvoy0xLy6NPTlMmDAgPi/siRUqFAhu//++23EiBG2fv36qNvoIKGDoYK/L7/80l555RV30FIXTlYUzHgHAQUOM2bMCDtw5KUff/zRfS6aNWsWc5vTTz/dvQ59RkSfH3WDeAciPYZeuw7OkRSYKFj2ska6KHDz3Hnnnfbwww+7z6wyF926dTvo1/Tbb7+5LquXX37ZZYd0QIwV4Kl9yijp34/a5gUh/fr1s9dff939O1DwpqCibdu2tm3btrC/V5ef/t2tXLnSnSCI/kb7Z/78+S5Y6dWrlws4FYDpsc455xzr3Lmza2c02hcKjtQuBWwKSE477bTg/Qp2+vbt67abPn2666pTV2NkZmLgwIHW/+Y+Nu+DaVa4cCH7d69e7nXpNc+ePdtWr16d6d/3zNmz7atV39i0N96wl0eNtjdee80G9Ovnim2jrVx80x39be6ihTZu9NO2aOZH1rH9he7fwKpVq8LeD2UfFcDq++iIwF/Fu7EeE0ASCBykI444IvDcc88Ftm/fHihSpEhg0qRJwftWrlypTvjAnDlz3PV33303ULBgwcDGjRuD24waNSpQsmTJwO7du7P9nDt27HCPq5+Rfv/998CKFSvcz0xm3J9/lxzq0qVL4KKLLnK/N2/ePNCtWzf3++TJk91r9XTv3j3Qs2fPsL+dPXu226/eaz799NMDN910k/t93759gZdfftk9xpNPPuluW758eaBu3brubxo0aBC45ppr3HuTlZkzZ7rHOOywwzJdChQoEHj00UeD21avXj1QtGhRd1/x4sXd3zVr1izwyy+/xHx83af2LFiwILB///5AmTJlAkOHDnV/J+PGjQscddRRUfdX5GuObPOHH34YvO3//u//3G1RPx8hfxPa1i+++MLdtnbtWnd9zJgx7vrq1auD2zz11FOBihUrxmyf9o/2i2fnzp3u38v48eODt+3ZsydQuXLlwLBhw8LaMmXKlLA26rW2atUqeF3vsfZ1586dg7dt2LAh7N9epNdff939u8vIyAhkx5YtW9zjLV261F3XvtB1/dvfs3GTu7w8erS7bfr06cG/03t43HHHhe2XMkccEdj+7drg3z354LBAicMOC/zx0wZ3PfS9/P777wOFChUKfLd4SXB7Xdq0aRPo379/2PuxePHiv/dlyLa6HPC7AUC+yOr4HSnXNSg60504caI709LZq7Iqe/fudd0Unjp16li1atVszpw57rp+NmjQwKWTPTpjzMjICGZhYqV4tU3oJZXpTFBnyDpjjrRkyRKXdVL63LtoH+rMdu3atWHpe92nzIkyC8pu6SxblAZftmyZzZ0712UTNm/ebO3bt49ZIBtKZ8Xq8gi9VK5cOdN26rrTfcry6AxclD3zMiSRlKJXV5MyJkuXLrWiRYu6upgvvvjCdu7c6TIqyrLkhpd1ENWBiF7zwVB3Ua1atcIeNyePqWyQ/r20bNkyeFuRIkXs5JNPzvS+q7skq9ekzJu6YfRvy+P9G4vVprPPPtuqV6/uugCVaVGdUGi2RdmJK664wt2v7lyvWyqy6yS0HRXLlXc/I9sR2YaG9eq5/edp1qSJ7dy1y9b9+GOmduqzoM/M8ae0sCNq1ghe9HkI7XLS5yW0LQCSX44r9fSFoYBE9SY6AE6ePNkd8HQw0pdEZOGivqA2btzoftfP0ODEu9+7LxbVZKiwM10o1a6go3///q7PPpQO1tdcc03UglMFg55OnTq5rg0FKDp4KkUfStebNm3qLqplGDdunDtQ6W9UlxKL7ot8j6MVfKr7QV0WojoH1ZToc6OC3NAgNpS6bxSgqIZAwYiG5KrGQ10/OiDdcsstlhs68HtUNyGxiii9/RQ6AkeBRFaP6T1uXq0kfNhhh2Xr+XPyOlWnoq4g7e8PPvjAdcOoVkndT3p/FbAqgFFtiwJQPU79+vUz1YpEe87I2w6mYFWfdwVgcz+Y5n4Gn7fcX3VuHn3OvecHkKYBynHHHeeCkR07dthrr73mih+9Ia15RQdq9Yd7lEEJrTFIRao50LBZ7e9QJ510kht54x38sxpFcqBtQinIlMhi23jxDi6///57zG0UlLzwwgsu4FGNgRe0aPTRN998E7X+xKPgOFZ2JifKl/8rC6BaEW9IdF7MWaLsi9r86aefukDAC4QUIOTXHCDazwoWdVEtiQIT1SbpfVBdioITFWOLgsR4+XLFCvc5UFAh8xctshKHHWZVjzoq07Ynnniie1+3/PyztWrePHg7k8EBqS/HAYq+VL0Dn0YW6AtVBXH//Oc/3dmVRj+EnmFrFI+KMkU/VdQXyhvl420Tjc6odUknSpMrC/LEE0+E3X777bdb8+bNXVGsumR0dq2AZdq0afbkk09m67E1WkNdC96QV3UNKQisXbu265aLh19//dVlxZRVWLdunSuc1MFfz5lV5kh/984777gATRSUqL3KAql9sagLYt68eW50ic6slX3JDX22Ffwqm3Dfffe5wEgFtvGm901dbuoKU1uV/Ro2bJjrZtGIqrymffztt9+6fa5A7N1333WZDgXEuq4uo2eeecbtd3XrhM7Nc7D0PdGzbx9XXPv9unV2z0PDrFe3bpmyfKL3/IqOHa3bDb3twUGD7YT69e3nrVvt48VfuC4ddRsCSE0HPQ+KvtRUI6JgRaldr95AdBamLzel9kU/1UUU2ietA6v6uL0zePxNIywi0+P6UlbGSgdOnd3qDFPp+Wh1ILGo+0ijapTG1wFAWTAFJkr152Z+jmjUJh3c1C4NUdUBWY+vA18sOjAqMFMg4wVKOoBqHxyo/kQjaJSl0edIf5/bYab6DCtj89VXX7l9rXqge++91/KCgjCNxlLXmjJjGvGiocvxmswuKzqJeOONN9xIPHWjjR492r3u448/3gUKqi9TXZm6dVS/pIn94uXMU0+1Y2rUtDaXXGydrulpF7RtawNuvS3m9s899rh1uvQyu33QQKvfqqX9o+tV7sQotEsTQOopoErZ7G6ss2zNa6AvBp3pTpgwwX2B60tVRXc6I9SZmIo4FXRo+KNoSLEoVatuCx20dLaoM2x9OSsToOG12aUuHnVhqJtJzxNKtTHKCKhWQvM7AIif7AzZzar7RTVV2zZtstfHvpjtv4/2nAfq4om2PhDfDUDiZXX8jpSj02VlPv7973+7/nk9gc4wveBENDupzr50Vqisis7UNZrEozNcpZYVyCiborNqnb0rUwAAAJCrAOX555/P8n6dlTz11FPuEosKApVlAQAAiCW9FgsEkFDq/mVmVwDZwWKBAADAdwhQAACA7xCgAAAA3yFAAQAAvkORLIC4oggWQDyQQQEAAL5DgIKotDLslClTstxGs4JefPHF+dYmrbejVZHThVYa1vug9a0AIN2kVRfPyMV/z2qb16474bocba+DvQ5EkUGBDlJnnnmm/fLLL279FO966OR4NWvWtJtuusl69uwZt/aHruarBfg0PfgXX3zhlipIFK2/otmHAQCpL60ClFSihRi1joGWrdfCf1o+oFatWtamTZu4PH5Wq0vnN61+q1W0tQhgPB4nnaTjawaQGujiSVIVKlRwQYQyGzfeeKP7+fnnn0fdVutB6uD+2muvBW9TJkSrDXs++eQTK1asmP3222+Zunj02KKVk3X7GWecEfb4w4cPd4+llYqvv/5627t3b8x2Dxo0yD33008/bVWrVrVDDz3ULrvsMrdwVGTX0X333ecWljzuuOOidvFoxeKLLrrISpQo4YI1Pc6mTZsyPddzzz2X5QJx33//vVvZWRkjZWi0oq+3HIMWuOzevbv7+0MOOcS15fHHHw/7e6+9WvCyYsWKLtOl9aX27dtnt912m5UpU8aqVKliY8aMCf6NslLal1o1+JRTTnFt08rBWqk6K3qftIq12qL9p/d+165dwfu1j4YMGeLWzNI+iWdWDQDyEwFKklPwMXXqVHewbtasWdRtdCA87bTTXPeQqLto5cqVLvvy1Vdfudt0YGzatKkLGCLNnz/f/fzwww9d188bb7wRvG/mzJm2Zs0a9/PFF190U5nrkpXVq1fbq6++6jI/aru6jq67LrxLbPr06S5LNG3aNLfAZKT9+/e74GTbtm2u7dru22+/tX/+85+Znuv11193bV68eHHU9iio0uKWs2bNsqVLl7oVuhX0eM+j4GLSpEm2YsUKGzBggN1xxx2u/aFmzJhhP/30k3uMRx55xAYOHGgXXHCBC3rmzZtn1157rV1zzTW2fv36sL9TAHPLLbe4faAFNBUobd26NWo7tZ/PPfdctxjnl19+aa+88ooLWHr37p0pYGzUqJF7zLvvvjvL9wIA/IouHh/Rgdg7MHp0Bh+NDpqiA6sOojpjVxASi7IeylqIDqLKhigDo6ClTp067ufpp58e9W+9rhVlSCK7fnQAfvLJJ91K1Xqc888/3wUXPXr0iNkWLXv/0ksv2VFHHeWujxgxwv3dww8/HHx8ZTKU+YjVPaHnUDCxdu1al0kQPaayH6pVUbDldXHo9qy6hxTc6aDfoEEDd101PZ4iRYrY4MGDg9eVSZkzZ44LUJSx8ShL8sQTT7jVvJVlGTZsmMtGKZiR/v372wMPPOACissvvzz4dwou9NwyatQoF7BpUc5+/fplaufQoUOtU6dOdvPNN7vrxx57rHtOvW/6Wy9D1Lp1axf05GZIcJGKFbL1dwCQ18ig+IiKX3WWH3rRQTqa2bNnh22j7gUdpGLRQUwZgC1btriMgwIWXRSYqEvms88+y9R1kx0KCBSceNTVs3lz1vNgVKtWLRiciDIHCrKUMfEoWMiqdkIZIAUmXnAi9erVc90rui909ewD1a6om+Tee++1li1busyHshOhtDp348aN3eMogHzmmWdcUBO5HxSceNTV4wU8on2kAC9y3+i1ewoXLmxNmjQJa3+oJUuWuOyU2uBd2rZt6/adAjWPHgMAkh0Bio8oa3DMMceEXUIP5KF0Jq/7dWDs2rWrde7c2dVsxKKDpc7yFZyEBij6XRkHBSmqhcgpZRgiu5N0wDxY8Rqtk53Hufrqq133kPahsjI6wCurI6oRufXWW10dygcffOACQu1vZWYOtB/ivW927tzpuolCA1gFLatWrXIF0jl5zQDgdwQoKUJn6KopiUUHRxVXvvnmm7Z8+XJr1aqVNWzY0HURqetHB+VYBzYvkxGruymnlH1QvYZn7ty5wa6R7Kpbt66tW7fOXTzKEGmotjIpOaVMjOpEVKui7pFnn33W3f7pp5+6wE01MuoWU1CoWpB40Wv3qKh20aJF7rVFc9JJJ7nXGBnE6sJInQN3Ze39eav9mfGrbRs3LtHNAZANBChJSl0FGzdudCNQVMD58ssvu6LRrChj8t///teNbFH3gIIC1a2MHz8+Zv2JN2JIo0ZUH6FRMqEjbnJDtRJdunRxZ//qqlIXi+o5cjK0+ayzznJZIdVkaPSSCnk1ckWvI6ddHKrpeP/99103iR5LBb9ekKA6j4ULF7r7v/nmG1d0qoxTvKj7aPLkya5YWcW6KmDu1q1b1G1vv/121xWnuhVlT5Q5UcAZWSQLAKmAACVJKdugeg+dPevApdS/1y0Riw7eyoKE1pro98jbIqk2QsWYyrRo2O+BAqEDUZs7dOhg5513np1zzjkukzNyZM4m0VNGSAdnFekqyFLAouJWjWzJKb1+BQcKSjRKpnbt2sH2aL+qrRodpFFSGmETOeLoYKhwVheNulEB7VtvvWXlypWLuq32k7rkFCgpG6aMjkYV6T0BgFRTIKBxqkkmIyPDSpUq5c7kNddD5AgRnQlnNe8FEkdzk2h+lVhDftOFX2bnzekonvxYCDCyDdGeMzft/GPvXvv+xx+tzNIvrfLVV8ehpQDiefyORAYFAAD4DgEKAADwHQIU5HsXT7p373hT0qt3NZHdOwDgZwQoAADAdwhQAACA77AWD4CkwxpCQOojQAHgK/kxlBmA/9HFAwAAfIcABQAA+A4BCpLSVVddZRdffHGW23z00UduSnwtIJhfQ6jTbdjwsU2a2BPPPJ3oZgBIQWlVg7JlxJP59lzlb+id4wPuiy++aEOHDrX//Oc/wds1Lfwll1zi5szILq2rozVbpFixYm6NGi0o560ho7VnHnroIRs7dqxbbFALAWpRvB49etjVMaYA18H+zDPPdIvZlS5dOtOcHlpwTxfvuh5XtCBhxYoVrV27djZ8+HC3dk48PP7442H7RK9ZwcFjjz1miXLrrbfaDTfckLDnB4BUQgbFR7R20IMPPuiCgIOlYGPDhg22YsUKt1KwFsPTSsYyePBge/TRR23IkCHufq3e27Nnz7hmGu655x73/D/88INbLXnWrFlu1eJ40VoOkYFSoihQ2rdvn1shumzZsgf1WHv37rV0smfPnkQ3AYBPEaD4iFbkrVSpksuiZEWr3mo1W2U+qlat6g78u3btCtvm0EMPdY+l7Im6HpQh0Uq5op/Kplx66aVuwTqtpNu9e3eXAYiXww8/3D3/UUcd5TIvXbp0sc8//zzm9nruCy64IHhdmRB1z0ydOjVsFeTnnnsuUxePflfGSFkV/Y0uWozPs2jRImvSpInbJ6eccop9/fXXMduhv9PfT5w40W2roLF+/frBjFRo19F7771njRs3dlkqvSeRXTz79+93gVqVKlXcNrov9PV4z6UVmLXStJ5LwVy0AEiPXa1aNfc4Wr04NNh7+eWX3evz9vm//vUv27x5c6b2vv/++24FZH1uWrdu7baZOn26NTi1lZU9ppZ17nWt/fbbb2FZKWXedFFAqFWWBz74QJbZvO07dtg1fftY5Xr13GOe07GDLVm+PHj/PQ89ZE3atLYXxo+z2k2b2OHVq8V8LADpjQDFRwoVKmT333+/jRgxwtavXx91mzVr1ti5555rHTt2tC+//NId3HRw1EEkKzooeWerOojNmDHDtmzZYvnhxx9/tLffftuaNWsWcxsdoPU61P0kCgh0QNTB1XsMvXYdNCMpMGnRokUwa6SLAjfPnXfeaQ8//LAtXLjQChcubN26dTtgm2+77Ta75ZZb3GrDeuz27dvb1q1bw7ZRV9wDDzxgK1eutIYNG0Ztl55XXVt6r9q2bWsXXnihrVq1KtPj3HTTTe5xtE2k119/3WW8nn76afe36vZr0KBBWNZF2bAlS5a4+xT4KGiLpCDnySeftM8++8zWrVvnMmsjnn3GXho5yt4cN94+/Phj99kLpW5H7bP58+e71/P46NEuuIjlih5X2+aff7a3J0ywuR9MsxMbNLRzL/2HbQvJCq5Zu9Ymv/N/9soLY2zB9OkxHwtAeiNA8RnVm+hMe+DAgVHvV3alU6dOrt5DWRGd5T/xxBP20ksv2R9//JFpex3wx40b5w6QOmuWRx55xAUnClR0YL322mtdNiA7lA1QV0boRd04kW6//XZ3nwIj/Y3O4PW8sSgj9Ouvv7qAQGfo6hJSgOAFKPqpbIyyKJF0dl+0aNFg1kgXBXue++67zwVA9erVc8GADtDR9lUoBXwKAuvWrWujRo1yz/H888+HbaPsyNlnn221atWyMmXKZHoMBSbaD5dffrkdd9xxrvsuWp2M3ssOHTq4bNaRRx6Z6XG0f/WalGFTFuXkk092wZhHAZdqfJQta968ufs86P3cuXNn2OPce++91rJlS5dFUcZMQeCIBx+0Exs0sFbNm1uHCy5w3X2hFOgpOFL79bm7rnt3e/zpZ6Lus0/nzbMFX3xhE599zhqfcIIdW7OmPThokJUuWdLeeOft4HZ79u61F0aMcM/bsN7xWb4PANIXAYoP6UCmM1edUUfSWbKKW0MDBJ11qzth7dq1we1GjhwZDBB0MOvTp4/16tXL3acD9bJly2zu3Lnu4KZUvzIEsQpkQ82ePdst9hd6UZdDtAyE7lNgNP1/Z8nnn39+MEMSSfUk6mpSILJ06VIXcKguRgGLDrQ6mCrIyI3Q7IYXAIR2gUSjrIlHGQR1oUS+H7otloyMDPvpp59cQBBK13PyOKKuuN9//90FIHovJ0+e7GpeQruw9P4peFE3j7efIgPH0P2gwmUFdDWrHx28rUK58pn2y8mNGtm+zVvc5Gm6NG/SxFav/Tbq+/jl8uW2c9cuq1S3jh1Rs0bwsvaHH2zNd38VTUv1KlWsfLlyWb5mAEirUTzJ4rTTTnNBR//+/TOl6nWwvuaaa6IWnOoA5dHZrro2FKDooKzRNKF0vWnTpu6iM3hlWTp37uz+Rmfysei+yOJUHcAjqXvGy3Yo06OsgQ76OkNXJiAadd8oQFGdhQ6yykoog6GuHwUoyqjkRpEiRYK/K5MjCugO1mGHHXbQj5Gdx1EWQ3UzH374oU2bNs3VD2kUlvaJuu30WdFF9Svly5d3gYmuRxagRu6H0OvebQezXxScHFmxok17Y3Km+5RF8SgwAoADIUDxKdU2qDtAqfVQJ510kht5E62rI5S6JA60TShlVSSy2DZevC4XZQJiUVDywgsvuIBHdTZe0KLRR998803U+hOPMi6xsjO5oeySAkVRtkJZigPV+YQqWbKkyyx9+umnYZkfXVcXTU4p0FSWRBeNyKpTp47LNKk7TLUx+rx4dTeqtYmX+Z9/EXZ93qJFdkyNmmFdaJ4TGza0jZs3W+FChezokGAZAHKDAMWnVASpLIjqCUKppkF1BjpYqktGZ98KWHRmrQLI7PjHP/7huhpUv6LaBnUNKVtTu3Ztd+CLB9WTbNy40R1AVZDZr18/d3av54xFAYH+7p133nEHXFFQovYqC6T2xaK5V+bNm+cKRNW1Fa0mJCeeeuopl/lRBkc1GBr6nZ3i2shuLtUSqUZFweaYMWNct1e0kTpZUZeegi8VGSv7oGyXApbq1au7jIeCMxW3qpZIXXcqmI2XdT+ut9sGDrCrO//bvlj6pY18/nkbNmhw1G3bnHaa6wL6R9erbOjdA1wNyoZNm+zdD6fZxe3Oc3UpAJBd1KD4mIowI1PuqiNQal8ZBRWWquBxwIABUetAYlH6X6NqdDaug76GACsw+eCDD6J21+SG2qSgQu3S8GEFUnr8rOYJ0SRuCswUyHiBkoIW7YMD1Z9omLLO6pUJ8ro5DoYCJF1UF6MuJg3NVrdVTqgbrm/fvq5rSq9LQ4z1OAp8ckJdas8++6wLKvX+q6tH75/2pV6rAphJkya51642qzg3Xq689DL7/fc/rGW7c+2m/v2ttybz69w56rbqInpr/AQ7tXlz63HzTXZ8y1PsymuvsR/Wr7cK5cvHrU0A0kOBQE6mKPUJFSCqC2PHjh0ulR5KozOUEVCthOaVAHJCGRh9dlScmw7T1keuHFykYoXg78peNaxd2x4ecq/5XWi7Y62I/Mfevfb9jz9amaVfWuWIgvDIWaZzOhM0gIM/fkeiiwcAsrEsBkELkL/o4gEAAL5DBgWIKLZNwl7PPKEh39G6SgAgPxCgAEg7+bmyOYB86OLRNOua2EuzVVaoUMEt1ha58JoK67wF27yLhj+G0ggLzSqqIZN6HA3HDJ0ZEwAApLccZVA0vFWTRClIUUBxxx132DnnnOPm4QidDVPTcWuIbLSZIzWfg4ITzb+hNVG0sNu///1vN6ulFsqLF9L0AEIFTN8JAfcfgBQLUEKXihfNv6AMiGbZ9GbdFG/Rtmg0F4YCGs3loPVANJRTE0tpAjKttqpJpw6GN323lo3XZFYAIH/s2as1DqxQxBIAAFKwBkXjmCVy1k7NlKnZLhWkaDKwu+++O5hFmTNnjpu0SsFJ6MRhWshu+fLlbuKxSLt373aX0HHUsWiyLk1s5S16puf11l8BEG7v3r1h1/+MWOU58n6/yqrdypwoONmydasV37DRCsZhHSYAPg5QNLunFpnT7Jb169cP3v6vf/3LTcGtGUS1kq0yI6pTeeONN9z9mv48NDgR77rui1X7Mnhw9Om1o/GyNwdasRZId39m/Bp2vdCvGVne71dZtzvgMicKTkr++GO+tw1APgcoqkXRuh+aBjxUz549g78rU6Lpztu0aWNr1qxxa5LkhtaJ0ZThoRkUb2G0aJQx0fOq+ylZzgCBRNg2blzY9TJXXpnl/X6VZbsD5rp1yJwAaRCgaKE6Leg2a9Ysq1KlSpbbaoEzWb16tQtQlN2YP39+2DabNm1yP2PVrRQrVsxdckrdPdFWXQXwlyK/h3eNRC4PEXm/XyVruwHEaZixRsYoOJk8ebLNmDHDrVlyIFq9VZTRkBYtWrhl4kO7X7QSr+bk12JnAAAAhXParTNhwgR788033VwoXs2IFv7RiBl14+j+8847z620qhqUPn36uBE+WoVVNCxZgUjnzp1t2LBh7jHuuusu99i5yZIAAIA0z6CMGjXKjdzRZGzKiHiXV155xd2vIcIaPqwgpE6dOm6Z+Y4dO7ql4T3qclH3kH4qm3LllVe6eVBC500BAADprXA8Jz9T4aomczsQjfJ59913c/LUAAAgjbCaMQAA8B0CFAAA4DsEKAAAwHcIUAAAQGqtxQMA6WrLiCfDrpe/oXfC2gKkIjIoAADAdwhQAACA7xCgAAAA3yFAAQAAvkOAAgAAfIdRPEAKY6QJgGRFBgUAAPgOGRQAyEU2CkDeIoMCAAB8hwAFAAD4DgEKAADwHWpQgBSqiWCUDoBUQQYFAAD4DgEKAADwHbp4gBTCUFgAqYIMCgAA8B0CFAAA4DsEKAAAwHeoQQGAOGBhRiC+yKAAAADfIUABAAC+Q4ACAAB8hxoUII0wTwqAZEEGBQAA+A4BCgAA8B0CFAAA4DsEKAAAwHcIUAAAgO8QoAAAAN8hQAEAAL5DgAIAAHyHidoAIA+weCBwcMigAAAA3yFAAQAAvkOAAgAAfIcABQAA+A4BCgAA8B1G8QBJOioEAFIZGRQAAJDcAcrQoUOtadOmdvjhh1uFChXs4osvtq+//jpsmz/++MOuv/56K1u2rJUoUcI6duxomzZtCtvmhx9+sPPPP98OPfRQ9zi33Xab7du3Lz6vCAAApFeA8vHHH7vgY+7cuTZt2jTbu3evnXPOObZr167gNn369LG3337bJk2a5Lb/6aefrEOHDsH7//zzTxec7Nmzxz777DN78cUXbezYsTZgwID4vjIAAJAeNShTp04Nu67AQhmQRYsW2WmnnWY7duyw559/3iZMmGCtW7d224wZM8bq1q3rgprmzZvbBx98YCtWrLAPP/zQKlasaCeccIINGTLEbr/9dhs0aJAVLVo0vq8QAACkVw2KAhIpU6aM+6lARVmVs846K7hNnTp1rFq1ajZnzhx3XT8bNGjgghNP27ZtLSMjw5YvXx71eXbv3u3uD70AAIDUlesAZf/+/XbzzTdby5YtrX79+u62jRs3ugxI6dKlw7ZVMKL7vG1CgxPvfu++WLUvpUqVCl6qVq2a22YDAIBUDlBUi7Js2TKbOHGi5bX+/fu7bI13WbduXZ4/JwAASLJ5UHr37m3vvPOOzZo1y6pUqRK8vVKlSq74dfv27WFZFI3i0X3eNvPnzw97PG+Uj7dNpGLFirkLAABIDznKoAQCARecTJ482WbMmGE1atQIu79x48ZWpEgRmz59evA2DUPWsOIWLVq46/q5dOlS27x5c3AbjQgqWbKk1atX7+BfEQAASK8Mirp1NELnzTffdHOheDUjqgs55JBD3M/u3btb3759XeGsgo4bbrjBBSUawSMalqxApHPnzjZs2DD3GHfddZd7bLIkAAAgxwHKqFGj3M8zzjgj7HYNJb7qqqvc748++qgVLFjQTdCm0TcaoTNy5MjgtoUKFXLdQ7169XKBy2GHHWZdunSxe+65h3cEAADkPEBRF8+BFC9e3J566il3iaV69er27rvv5uSpAQBAGmEtHgAA4DsEKAAAIDWGGQNAXlqwcUGm25pWapqQtgBIDDIoAADAdwhQAACA7xCgAAAA3yFAAQAAvkORLADEAYW9QHwRoABAPtgy4smw6+Vv6J2wtgDJgC4eAADgOwQoAADAdwhQAACA71CDAiApi1ApQAVSGxkUAADgO2RQAMQNQ20BxAsZFAAA4DsEKAAAwHcIUAAAgO8QoAAAAN8hQAEAAL7DKB4giTE3CIBURQYFAAD4DgEKAADwHQIUAADgOwQoAADAdwhQAACA7xCgAAAA32GYMQDEaWFEAPFDBgUAAPgOAQoAAPAdungA5ClmuwWQG2RQAACA75BBAXxqy4gnE90EAEgYMigAAMB3CFAAAIDv0MUDAD7t0it/Q++EtAXwAwIUADgAJmUD8h9dPAAAwHfIoABAknYD0QWEVEaAAiDh3SVM3gYgEgEKkOIICAAkIwIUAMEg5rvFI4O3XXfCdQlsEYB0R4AC5BPqB2JL51EyI/8XFB79v31Adgv4CwEKgJgHTu+g6ccDZ2hQ42V+cpP1oQsMSJFhxrNmzbL27dtb5cqVrUCBAjZlypSw+6+66ip3e+jl3HPPDdtm27Zt1qlTJytZsqSVLl3aunfvbjt37jz4VwMAANIzg7Jr1y5r1KiRdevWzTp06BB1GwUkY8aMCV4vVqxY2P0KTjZs2GDTpk2zvXv3WteuXa1nz542YcKE3LwGIC2kczdIOolXZghIuwClXbt27pIVBSSVKlWKet/KlStt6tSptmDBAmvSpIm7bcSIEXbeeefZ8OHDXWYGSEeRNSrJEJD4vY1+b9+BHD1pnvu5Zfb+RDcFSI0alI8++sgqVKhgRxxxhLVu3druvfdeK1u2rLtvzpw5rlvHC07krLPOsoIFC9q8efPskksuyfR4u3fvdhdPRkZGXjQbQAof7AGkeYCi7h11/dSoUcPWrFljd9xxh8u4KDApVKiQbdy40QUvYY0oXNjKlCnj7otm6NChNnjw4Hg3FQDyNaij+BZIYIBy+eWXB39v0KCBNWzY0GrVquWyKm3atMnVY/bv39/69u0blkGpWrVqXNoLpNJKtGQ5AKSKPB9mXLNmTStXrpytXr3aBSiqTdm8eXPYNvv27XMje2LVraimJbLQFkjFoAYAkE8Byvr1623r1q125JFHuustWrSw7du326JFi6xx48buthkzZtj+/futWbNmed0cAPDVCB0AcQpQNF+JsiGetWvX2uLFi10NiS6qFenYsaPLhqgGpV+/fnbMMcdY27Zt3fZ169Z1dSo9evSw0aNHu2HGvXv3dl1DjOABAAC5mqht4cKFduKJJ7qLqDZEvw8YMMAVwX755Zd24YUXWu3atd0EbMqSzJ49O6yLZvz48VanTh3X5aPhxa1atbJnnnmGdwQAAOQug3LGGWdYIBCIef/7779/wMdQpoVJ2QAAQNwyKAAAAHmNAAUAAPgOqxkDaYj5UgD4HRkUAADgO2RQAMDnmDIf6YgMCgAA8B0yKEAOjIyY/fO6E65LWFsAIJURoABABIqIgcQjQAF8gANifDNcR7M/gaRHDQoAAPAdMigAkAKoj0KqIYMCAAB8hwAFAAD4DgEKAADwHWpQgARg1A4AZI0ABQCQ3GYODb9+Zv9EtQRxRIACAElqy4gng79r7pfvLm2W0PYA8UQNCgAA8B0yKEhLkXNGCPNGAMynAv8ggwIAAHyHDAoQ57VfmlZqmsAWAUBqIIMCAAB8hwwKkIbW//Jb2PUqRxyasLYAQDQEKEAuHD1pXtiBfn3IfU0rHbgbCACQNbp4AACA7xCgAAAA36GLJw89Ou2bsOt9zq6dsLYAAJBMCFCA/2GCKiB/pc1JXORaQcJ6QQdEgAIAKSgZZktu/sMzEbcMT1BL4EcEKOmEFT8P3trZf/3cvuF/N5RPZGuQCwyxTsFsRJykTUYnSRCgJAMCCyBfEcRkje5Q5AcClGQMCOjPzBedXx+S6bYWjHtLeiV3e9kvTy1LNguYUyePMjEdLVEiszeS7hkcAhSkd1dNqBqnxvXg8V2UGgAgFWpZUqVLB/5GgJIi4tV3mox9sMnYZiAvuqXmrNkavN6iVllLNlGzCByl0hZvfYqgGt7nZ4FAmosWfDRPSEuQLOhRBwAAvkMGBSmHbAkAJD8CFCCGKhmLMt227n8/y/yx769fiuVvm1JxCK8wjDef+H2EIhJnpv9GhxKgRDHn278LzaTFmck3ZC1VPvz5WgAbbWRPnkzuZmalq+fNcyHntn8f5cbyefPYefy+nzr9y+DvVT4/1L67tFmePh9yd0zJ9nElzQcOEKBkw5znb810W4vuw9P3jCYZ2wz4lDfypvD/Mktkk5L/+y8yQKAYOHcIUPwmjcf8R6vyj5fQ4Zd5PeGa14XhPWeiJ3eL1qWCJBenrM/IyVfk2XxA6SzzqErkBgEKkkpeBjEAAP8gQMnDPkarZslfW5MEZyZzq/XM8eOs2/57ptuqlj7E8gtrvSA/HD1pXtj1OTUjsixl8rc98JGZ/s/W5zj5PGvWLGvfvr1VrlzZChQoYFOmTAm7PxAI2IABA+zII4+0Qw45xM466yxbtWpV2Dbbtm2zTp06WcmSJa106dLWvXt327lz58G/GiQ1BR+RF2Qj1R96Qf5J1X2vou7QS263AfI7g7Jr1y5r1KiRdevWzTp06JDp/mHDhtkTTzxhL774otWoUcPuvvtua9u2ra1YscKKFy/utlFwsmHDBps2bZrt3bvXunbtaj179rQJEyYc7OtBnDH19F8affJzopuQtPKqBib0cYP1Pkk4vXuuMm4Fcvc4bxVcHXa9quVf1hD+lt8jjbIjx4eadu3auUs0yp489thjdtddd9lFF13kbnvppZesYsWKLtNy+eWX28qVK23q1Km2YMECa9KkidtmxIgRdt5559nw4cNdZgZIJV53UnDuFGH+lJyLyFKU3P33/qySsfd/v51jvprfJZeBhJ9EFpj7ofA7bcxM7xGTcf2YrV271jZu3Oi6dTylSpWyZs2a2Zw5c9x1/VS3jheciLYvWLCgzZsX3l8KAADSU1yT9QpORBmTULru3aefFSpUCG9E4cJWpkyZ4DaRdu/e7S6ejIyMeDY7fcVpcrnOrw/JdNtJJf9p6d4tlEyzq0YOjY7WXZLoNuankrtDJtVzaiWoJam05MTfJ66I9v0bv4k9m6fI4rFJcdgYOnSoDR482JJeElRNJ6PIf4wjM0olrC048ME+o9iRlmyy032TOagxs+L+/oqN12i2aLVqxSIGDG1e3y/KXx6T4+cauf3v2XI915VuaHmBGrzEiuuurlSpkvu5adMmN4rHo+snnHBCcJvNmzeH/d2+ffvcyB7v7yP179/f+vbtG5ZBqVq1ajybDoQ5+sMN4TUjKV6Q6h1c/67lSGw9R65HxUQuK5DOSwpE3YelE9CQ1C4mnbsvcxDDzLE+DFA0akdBxvTp04MBiYIJ1Zb06tXLXW/RooVt377dFi1aZI0bN3a3zZgxw/bv3+9qVaIpVqyYuyDFkFHKmVQayuqjfZaRhIFodkR7XZGj0Za0Kmd+EnVqgZqpOTIrL7M8zS1NAxTNV7J69eqwwtjFixe7GpJq1arZzTffbPfee68de+yxwWHGGplz8cUXu+3r1q1r5557rvXo0cNGjx7thhn37t3bjfBJ9RE8UYdx8Y8PsboHIuoeoh1wSvq8CyG3rzUtAqa1e/73S73Y20QZseQk4fuOJJhc1Gdy/ClfuHChnXnm34Ojva6XLl262NixY61fv35urhTNa6JMSatWrdywYm8OFBk/frwLStq0aeNG73Ts2NHNnYLUmf0205lQLgOxzzNeCX/cXLanSsaiHPXHJ2P3DvJXdjIvWW0T/LyVtKQXLfPxRfmaCWkLUkeOA5QzzjjDzXcSi2aXveeee9wlFmVbmJQtxvTzKZpRSdXsUbRsQLyKQCPrSVLgOIYkFDW4jyikjZwATqpazbz57igTn66QZCx2fTTN1iJLwrfIH7KznHZeTdWe6NRcpucvzaiZAxecIplF75aCXyXDgTzT96jP1m7zAwIUn0l0piEyqMrNQny5Fe0szOyvQmoAiRt67HeZhh6XiN+cIkgcApRUxQgZ5EM3VHaGOdM1lb0ulJK7f06aAthsBTH5uIhgZHsqlIjP47JgaWL5/18CcMAsSySyLvklU51MHnaFZKcoNXLdo2Qc5ZTrfcFMDEmNIdaZseQTAADwndQ8vUBy1tsQLvuLUvTejKxOxNzl8BUKeRMjP7uBmuf2uZK0y58AJUUleqQPssbBBH7l99lmkT4IUIAUCmIIfOAXiRwNlK1MQ5rXdyQDApRcSpfq7nR5nfDXaCAgmWfSRnwQoABIPBZCRArP04LcIUBBysnOujv0s6eHVF2pGHkjVTPGc5K0JpEABQB8hKAK+AsBShJI1ugXAA52IsYL9x+TkLYg8QhQkEddKnyppEJfvzcjKwDkNwIU5AvOjAAAOcHcnQAAwHfIoABpiAndkDyLf6avOWlef0iAAiDXCHSQ1whi0hddPAAAwHfIoABJjDkzAKQqApQ0xsga+DXQKlmcryYg3fEtgKSfth4AkHoIUJAnKGzDwaDrCgABChKGIAYAEAsBCgCkqchVvdPphIh6O/8jQEGW0qWQNp2+qAEgGTAPCgAA8B0CFAAA4DsEKAAAwHcIUAAAgO9QJAsAyHYB+ZJW5SxdpMsgAb8igwIAAHyHDArSYvI0hhEDSNfvv2RFgJKiSE0CAJIZXTwAAMB3CFAAAIDv0MUDAEg71Jv4HxkUAADgOwQoAADAdwhQAACA71CDgpTDnCcAkPzIoAAAAN8hg4Ico/odAJDXCFAAALmWzosJIsm6eAYNGmQFChQIu9SpUyd4/x9//GHXX3+9lS1b1kqUKGEdO3a0TZs2xbsZAADkSQY59IIky6Acf/zx9uGHH/79JIX/fpo+ffrY//3f/9mkSZOsVKlS1rt3b+vQoYN9+umnedEUAECayc/AgXXPkixAUUBSqVKlTLfv2LHDnn/+eZswYYK1bt3a3TZmzBirW7euzZ0715o3b54XzQEAAEkmT0bxrFq1yipXrmw1a9a0Tp062Q8//OBuX7Roke3du9fOOuus4Lbq/qlWrZrNmTMn5uPt3r3bMjIywi4AACB1xT2D0qxZMxs7dqwdd9xxtmHDBhs8eLCdeuqptmzZMtu4caMVLVrUSpcuHfY3FStWdPfFMnToUPc4AACkIrqK8iFAadeuXfD3hg0buoClevXq9uqrr9ohhxySq8fs37+/9e3bN3hdGZSqVavGpb0AACANhxkrW1K7dm1bvXq1nX322bZnzx7bvn17WBZFo3ii1ax4ihUr5i6IjWpyAEAqyfOZZHfu3Glr1qyxI4880ho3bmxFihSx6dOnB+//+uuvXY1KixYt8ropAAAgXTMot956q7Vv39516/z00082cOBAK1SokF1xxRVuWHH37t1dd02ZMmWsZMmSdsMNN7jghBE82c+OpHu/JIDUWisr1Sd3o77EJwHK+vXrXTCydetWK1++vLVq1coNIdbv8uijj1rBggXdBG0andO2bVsbOXJkvJuBFMZigEDy4N8rfBOgTJw4Mcv7ixcvbk899ZS7AACQjqgbPDBWMwYAAL7DYoEAgIRiwUFEQ4CShGk+UoMAgFRHgIKE4+wJSB0UxSJeCFAAAPCht9J8igmKZAEAgO+QQQEAII6oE4wPMigAAMB3yKCkUf8hUT0AIFmQQQEAAL5DgAIAAHyHLh4AAJLUWym8UjIZFAAA4DtkUHKJglMAAPIOGRQAAOA7BCgAAMB36OJJgvlL0g2LBwIAyKAAAADfIYOSjyis/QvLsQMADoQABXFF9wyAg8X3SHTpdpJLgAIAQAp5K0XqKKlBAQAAvkOAAgAAfIcABQAA+A41KNmQboVJAAAkGgEKAMDXGNWT9yfZfiykJUABAKTUXEoEMKmBAAV5iknZAAC5QZEsAADwHQIUAADgOwQoAADAd6hBAQAgzb0VZaRPC0ssAhTkCMP9AAD5gQAFB4VROgCAvECAAgBIKWR6UwMBCgAg7RHU+A+jeAAAgO+QQUlh8TgjoMYEQCriu83/CFDSWLR/oKQ1AQB+QBcPAADwHTIoCEPaEwDgBwQoAAAc5Mka3ePxRxcPAADwHTIoAAAcJAYdpFiA8tRTT9lDDz1kGzdutEaNGtmIESPs5JNPtmTkh0l+qB8BAP+gmyhJA5RXXnnF+vbta6NHj7ZmzZrZY489Zm3btrWvv/7aKlSoYKkuN8EEH14ASF1+ONH1k4TVoDzyyCPWo0cP69q1q9WrV88FKoceeqi98MILiWoSAABI5wzKnj17bNGiRda/f//gbQULFrSzzjrL5syZk2n73bt3u4tnx44d7mdGRkaetG/373sPuE39OVvDrv+Ww8f4bc++HLfr2Bkbs37MA/x9ZJty04ZEoN35JxnbnKztTsY2J2u7o30fJ0O7j434zl/Womy+Pn9eHGO9xwwEAgfeOJAAP/74o1oW+Oyzz8Juv+222wInn3xypu0HDhzotufChQsXLly4WNJf1q1bd8BYISlG8SjTonoVz/79+23btm1WtmxZK1CgwEFFclWrVrV169ZZyZIl49RaRMO+zj/s6/zDvs4/7OvU2N/KnPz6669WuXLlA26bkAClXLlyVqhQIdu0aVPY7bpeqVKlTNsXK1bMXUKVLl06bu3RzucDnz/Y1/mHfZ1/2Nf5h32d/Pu7VKlS/i2SLVq0qDVu3NimT58elhXR9RYtWiSiSQAAwEcS1sWjLpsuXbpYkyZN3NwnGma8a9cuN6oHAACkt4QFKP/85z9ty5YtNmDAADdR2wknnGBTp061ihUr5lsb1G00cODATN1HiD/2df5hX+cf9nX+YV+n3/4uoErZhD07AABAFCwWCAAAfIcABQAA+A4BCgAA8B0CFAAA4DtpHaA89dRTdvTRR1vx4sXdisrz589PdJOS2tChQ61p06Z2+OGHuxWpL774Yrc6dag//vjDrr/+ejcLcIkSJaxjx46ZJuxDzj3wwANuVuWbb745eBv7Or5+/PFHu/LKK93+POSQQ6xBgwa2cOHC4P0ab6BRiUceeaS7X2uLrVq1KqFtTkZ//vmn3X333VajRg23H2vVqmVDhgwJW7uFfZ07s2bNsvbt27tZXPV9MWXKlLD7s7NfNYt7p06d3ORtmjC1e/futnPnTssTgTQ1ceLEQNGiRQMvvPBCYPny5YEePXoESpcuHdi0aVOim5a02rZtGxgzZkxg2bJlgcWLFwfOO++8QLVq1QI7d+4MbnPttdcGqlatGpg+fXpg4cKFgebNmwdOOeWUhLY72c2fPz9w9NFHBxo2bBi46aabgrezr+Nn27ZtgerVqweuuuqqwLx58wLffvtt4P333w+sXr06uM0DDzwQKFWqVGDKlCmBJUuWBC688MJAjRo1Ar///ntC255s7rvvvkDZsmUD77zzTmDt2rWBSZMmBUqUKBF4/PHHg9uwr3Pn3XffDdx5552BN954w62HM3ny5LD7s7Nfzz333ECjRo0Cc+fODcyePTtwzDHHBK644opAXkjbAEWLEl5//fXB63/++WegcuXKgaFDhya0Xalk8+bN7h/Bxx9/7K5v3749UKRIEfeF41m5cqXbZs6cOQlsafL69ddfA8cee2xg2rRpgdNPPz0YoLCv4+v2228PtGrVKub9+/fvD1SqVCnw0EMPBW/Te1CsWLHAf//733xqZWo4//zzA926dQu7rUOHDoFOnTq539nX8REZoGRnv65YscL93YIFC4LbvPfee4ECBQq4RYDjLS27ePbs2WOLFi1y6StPwYIF3fU5c+YktG2pZMeOHe5nmTJl3E/t871794bt9zp16li1atXY77mkLpzzzz8/bJ8K+zq+3nrrLTfr9aWXXuq6L0888UR79tlng/evXbvWTTgZur+13oi6jtnfOXPKKae4ZU+++eYbd33JkiX2ySefWLt27dx19nXeyM5+1U916+jfgkfb6/g5b968uLcpKVYzjreff/7Z9XNGzlqr61999VXC2pVKtLaS6iFatmxp9evXd7fpw691mCIXetR+133ImYkTJ9rnn39uCxYsyHQf+zq+vv32Wxs1apRbouOOO+5w+/zGG290+1hLdnj7NNp3Cvs7Z/7zn/+4lXQVUGtRWX1X33fffa7uQdjXeSM7+1U/FaCHKly4sDsJzYt9n5YBCvLnzH7ZsmXuzAfxpyXQb7rpJps2bZor8kbeB9w6a7z//vvddWVQ9PkePXq0C1AQP6+++qqNHz/eJkyYYMcff7wtXrzYneyosJN9nV7SsounXLlyLjKPHNGg65UqVUpYu1JF79697Z133rGZM2dalSpVgrdr36p7bfv27WHbs99zTl04mzdvtpNOOsmdwejy8ccf2xNPPOF+11kP+zp+NKqhXr16YbfVrVvXfvjhB/e7t0/5Tjl4t912m8uiXH755W6kVOfOna1Pnz5ulKCwr/NGdvarfup7J9S+ffvcyJ682PdpGaAoLdu4cWPXzxl6hqTrLVq0SGjbkpnqrhScTJ482WbMmOGGCYbSPi9SpEjYftcwZH3Js99zpk2bNrZ06VJ3dulddIavNLj3O/s6ftRVGTlkXjUS1atXd7/rs64v6ND9rW4K9cuzv3Pmt99+czUNoXRCqe9oYV/njezsV/3USY9OkDz6rtd7o1qVuAuk8TBjVSePHTvWVSb37NnTDTPeuHFjopuWtHr16uWGqH300UeBDRs2BC+//fZb2NBXDT2eMWOGG/raokULd8HBCx3FI+zr+A7lLly4sBsCu2rVqsD48eMDhx56aGDcuHFhQzT1HfLmm28Gvvzyy8BFF13E0Ndc6NKlS+Coo44KDjPWkNhy5coF+vXrF9yGfZ37UX9ffPGFu+jw/8gjj7jfv//++2zvVw0zPvHEE91w+08++cSNImSYcR4YMWKE+wLXfCgadqxx3cg9feCjXTQ3ikcf9Ouuuy5wxBFHuC/4Sy65xAUxiH+Awr6Or7fffjtQv359d2JTp06dwDPPPBN2v4Zp3n333YGKFSu6bdq0aRP4+uuvE9beZJWRkeE+x/puLl68eKBmzZpu7o7du3cHt2Ff587MmTOjfkcrKMzuft26dasLSDQ3TcmSJQNdu3Z1gU9eKKD/xT8vAwAAkHtpWYMCAAD8jQAFAAD4DgEKAADwHQIUAADgOwQoAADAdwhQAACA7xCgAAAA3yFAAQAAvkOAAgAAfIcABQAA+A4BCgAA8B0CFAAAYH7z/9hDmrbN05E4AAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from neps import algorithms\n", + "import matplotlib.pyplot as plt\n", + "values=[]\n", + "for _ in range(10000):\n", + " values.append(algorithms.hyperband(SimpleSpace(),sampler=\"uniform\")({},None).config[\"int_param3\"])\n", + " values.append(algorithms.neps_hyperband(SimpleSpace(),sampler=\"uniform\")({},None).config['SAMPLING__Resolvable.int_param3::integer__1_100_False'])\n", + " values.append(algorithms.hyperband(SimpleSpace(),sampler=\"prior\")({},None).config[\"int_param3\"])\n", + " values.append(algorithms.neps_hyperband(SimpleSpace(),sampler=\"prior\")({},None).config['SAMPLING__Resolvable.int_param3::integer__1_100_False'])\n", + "\n", + "plt.hist([v for n,v in enumerate(values) if n % 4 == 0], alpha=0.5, label='HB with uniform sampler',bins=100)\n", + "plt.hist([v for n,v in enumerate(values) if n % 4 == 1], alpha=0.5, label='NePS HB with uniform sampler',bins=100)\n", + "plt.hist([v for n,v in enumerate(values) if n % 4 == 2], alpha=0.5, label='HB with prior sampler',bins=100)\n", + "plt.hist([v for n,v in enumerate(values) if n % 4 == 3], alpha=0.5, label='NePS HB with prior sampler',bins=100)\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "4d423fb2", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAGdCAYAAAA44ojeAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAAQalJREFUeJzt3QucjHX///EPLUtCDiEhKkUl5RCLuxOlklTuSrdKEhUqKspdSHJIhcghkkPppCK5SwmJrHOUyKHcSKETG3LK/B/v7+9/zT0zO3uetdfuvp6Px9idmWtnvnPNmO/7+h6ub4FAIBAwAAAAHymY0wUAAACIREABAAC+Q0ABAAC+Q0ABAAC+Q0ABAAC+Q0ABAAC+Q0ABAAC+Q0ABAAC+E2e50LFjx+ynn36y4sWLW4ECBXK6OAAAIB10btg///zTKlasaAULFsx7AUXhpHLlyjldDAAAkAnbt2+3SpUq5b2AopYT7wWWKFEip4sDAADSISkpyTUwePV4ngsoXreOwgkBBQCA3CU9wzMyPEj2iy++sJYtW7r+Iz3BjBkzUtz2vvvuc9sMHz487Pbff//d2rZt68LFySefbB06dLB9+/ZltCgAACCPynBA2b9/v9WuXdtGjRqV6nbTp0+3JUuWuCATSeHk22+/tTlz5tisWbNc6OnUqVNGiwIAAPKoDHfxXHPNNe6Smh07dtgDDzxgn3zyibVo0SLsvvXr19vs2bNt+fLlVq9ePXfbyJEj7dprr7Xnn38+aqABAAD5S1x2TAG+4447rEePHnbeeecluz8xMdF163jhRJo1a+amGy1dutRuvPHGZH9z6NAhdwkdZAPAX1MHjx49an///XdOFwVADjrhhBMsLi4uJqcAiXlAefbZZ13hHnzwwaj379y508qVKxdeiLg4K126tLsvmkGDBlm/fv1iXVQAMXD48GH7+eef7cCBAzldFAA+cOKJJ9qpp55qhQsX9k9AWblypb344ou2atWqmJ5ArVevXvbwww8nm6YEIGepxXTLli3uqEnds/pC4uSJQP5tST18+LD98ssv7nuhevXqaZ6M7bgFlIULF9ru3butSpUqwdvU5PvII4+4mTz//e9/rUKFCm6bUGoa1swe3RdNfHy8uwDwF30ZKaTogEFHTQDyt6JFi1qhQoVs69at7vuhSJEi/ggoGnui8SShmjdv7m5v3769u56QkGB79uxxrS1169Z1t82bN899yTVo0CCWxQFwnGTlKAlA3lIwRt8HGQ4oOl/J5s2bg9fVjLN69Wo3hkQtJ2XKlAnbXklKLSPnnHOOu16zZk27+uqrrWPHjjZ27Fg7cuSIde3a1dq0acMMHgAA4GQ45qxYscIuuugidxGNDdHvffr0SfdjTJ061WrUqGFNmzZ104ubNGli48aNy2hRAADHwWWXXWbdunVLdZtJkya5GZp+KJ8GbLdu3dqdDFRjotRqj+iqVq2a7GSqfhGXmQ+CBsKkl8adRFJryxtvvJHRpwaQiwybs/G4PVf3K8/O8N/cddddNnnyZDdL8PHHHw/errNj63QHGfmeizVV9qpwo1WsqnB1IswbbrgheN3jDVb+5z//6V5XrMbuvf/++641PLRSU/nSCi3HS2T59L5qTOTixYutbNmyVrJkyRwtHzKHjmMA+ZYG8OnUCH/88YflZhMnTnRTvdXlPnr0aHvttdfsmWeeidnj66AyPYu75ZTI8n3//fduOMH555/vhhhkZmaZJnhobCRSp4Gw2YWAAiDf0qB+VWBqbUjNokWL7B//+IeboaAZSzrPk5b9CG1R6N+/v912221WrFgxO+2008KWA1FrzFNPPeXG6alVQ60cKZ0rKjPUtaLXobJdd9111qpVK3e6h5SohUVj/zxqCVEl/t133wUrHb2Ozz77LFkXin7XDI3u3bu7v4ms/HUGcYWDk046yY03VHDKSLeQWrBCH1P77cILL3ShS/tZrSEas/jnn38Gt4ks3wsvvOCWUNHj6LoohN55551WqlQpN+NMZ0TftGlTsrLMnDnTzj33XPc+bdu2zT2nwp7+Vq/p9NNPd9toKq32s2674IIL3PCHlKT1/uu16eSlCll6H//1r3+FzXb9/PPP3WvRvtWQCn0Or7jiCrfNxx9/7Pa3urP0d6HnI9Jr1/usi/abWpN69+6dauugWu3uueceO+WUU9xj6nnWrFmT7P145ZVXrFq1almapZMWAgqAfEtdIgMHDnTLbfz4449Rt9HRuCpajWn4+uuv7e2333aBJbSCl+eee86tU/bVV1+5LqOHHnrIrTcm7733ng0bNsxefvllVymqEq5Vq1a2vKaNGze6mZGpzYq89NJLXaXnWbBggau8vNu0FIkmMDRq1Chqd0qlSpXs6aefduEjNICoctSSJapwFRBUwT/66KNZfk16D7TPtHabLirv4MGDo26r8mkShmaMqmy67nXpKUQoXOiM5qqkNQZSrzO0/GpRU+Wr9eK8k4rqvWvcuLF7b7V8i2amKrDcfvvtLgieeeaZ7npKFX9a77/KoICrIKD7NDTirrvuSvY4CgcvvfSS67ravn273XLLLW78iIZM/Oc//7FPP/3UfZZDqbtLJ0NdtmyZO0/Z0KFD3etLyc033xwMPpptW6dOHTdeVKcC8WiijF6T9q0myWSXmJ9JNk+an/rRlfwyK8qbVPUfaf7dKQ+Ef8kBOL403kRHhH379rUJEyYku1+tK1rg1DtC18mnRowY4Sr5MWPGBI8gVYF5Y1nOPvts+/LLL12ldOWVV7qKWkfGarHRWAkdSV988cWplmvv3r3u6Dw91HKjsKVzSmlZELWi6ASXKdGRtQKUWgFUea1bt84dWSugaBV6/axfv37Uc9uoO0XP5R3th1JFq9mZqrBFIU5BJqvU1aIWDq8bRwFh7ty5NmDAgKjlU7l10kCvfAoFCiZ6T7zQpckaanFSIFCl7JVfXWQKmqEUZO699173uyaE6H3X/vH+7rHHHnOBaNeuXVHP55XW+3/33XcHfz/jjDPc56t+/fpu1mzoZ0AtOfqcSYcOHdx7rPCmv/FaxubPn+/K49Fr1OdQLTCaTfvNN9+46wpxkRS8FWQUULzxSwqc2kfvvvtucFFftbBNmTLFtbJkJ1pQAOR7OmrWkaYWM42ko1pVjqoovIvO7+SdRdejCiqUrnuPp4rsr7/+chWJKgYNclWYSI0qYx2dRl6iUYWj+1RWtTCoFUWVeEo0NkMVuVoiNJhU3QYKNbou+ul1jWSEgoEXTkSnO488MWdmqJsldIxJRh9X74OCWGirkk6JoQo79D1XqFF3TaTQ28qXL+9+hraAeLelVKa03n+1VLRs2dIFF71OhV8v2KRWDu1vL5x4t0WWoWHDhmFdZvpcKrBFWzdLnx+FIu2b0M+7PucKQh51c2V3OBFaUADke5dccokLHToijWxa1xe2jp6jjRkJPWt2anQUu2HDBjemQ90+nTt3dl1CCgKhs08iT3Z11llnpevxdXTubatKV+Mz1KqiI+5oj6EKS69ZLSU6UlYYUeWn1pe1a9e6LoTMdM1EvhY9T2rjHfQaI+8P7XJJ7XGzYwCrxnZEG1Ab+vze/dFuS6lMqb3/ao3QZ08Xteqo4lcwad68ebIBqJHPGev9os+6wl9o958ndKyQxicdDwQUADBzYxrU1eOdVNKjPnh1gaQVFpYsWZLsugYvhlZ+OkrWpUuXLu5cUGpu1+PHmrpgREftKdFR+vjx411AUVeJwoJCiypOBRWvKyEatTTEYuVqVcYKUxpw7FV62TGmQe+DWiyWLl0a7OL57bffXGjQgNjjIaX3XwFNZdHnz1tjLrUBtxml1xz5uVQ3pfcZCaXPohbtVWuTWq1yGl08APD/m+w11kT9/6HUn68WBY2nUOWp5vEPPvgg2SBZjW8YMmSI617RDJ5p06a5cR6iLiKNb1HrxA8//GCvv/66q7DUVB4LmnmhiuWnn35yR+Ua96FxMKEBKZJaTRS8NBhUJ8v0btNRvGaUpHaUrMpLg2B37Nhhv/76a6bLrS4XdVP8+9//dl0IGuypfRVrqpA140bdKxpnoa4MDXDVbCvdnt1Se//VCqfAp8Gtuk9jZTRgNlbUGqMTqiqMvfnmm+55vM9lJI2RUReQzrGjAbcarKvP/hNPPBHT0JRetKCk4wRTDbf9lmybmQX/d7p/qb0n+X/SpPX/15+bmoXvZf38Cwlnhi8vEE3nCztn+XmAvE4Vu2bphFLXhyp9fUlrqrGOeDXO4tZbbw3bToui6ku8X79+bnqmZkuomd5rHtcRsioKtTwoDH344YfJlgbJLG+tMzXxq7tHLSGanaQj4ZSoDCqXgow3EFMBReVLa/yJ9pO6vbQf1NqS2ZPaaRyMKusePXq41hzNFtFMFW8wZqzPFaOKWWNt1HWiffTRRx+l2MUWS2m9/wowCmkKx2rF0MDU66+/PibPrdlFaknToFy1mmgfpLR/9fnRPtFnXZ8pDaL2Pk/eOJvjqUAgJ0+XmElJSUluTrdGueuLIPsDyri0A8qiKAEl/tQ0n2th0+QDsjKKgIKccvDgQTeALrvPh+B3fjuzKiAKmuq2PN6nsk/teyEj9TddPAAAwHcIKAAAwHcYgwIAWRRtUVQgp30eZbpwbkILCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgD4gE4zPmPGjFS30UrLWifFD+X77rvvrGHDhu5MoTpbKVKegq59lx2LIOZ1nAcFQPaYP+j4PdflvTL8J6rstcheZCjQuSMuv/xy++OPP9waKt51jyrkM844I9U1TTLj559/tlKlSgUrNZ0m/KuvvvJN5R9aPunbt69bUFCL0Hlr+QCxREABgHRQRay1Q7TwmhZ6u//++91ieVrgLha0KJufRZZPqw+3aNEiSysya9E+reSL1B3Op/uJLh4ASIdy5cq5SlotGw8++KD7uWrVqqjbag3WU045xd59993gbWoJOfXU/y0gumjRIouPj7cDBw4k60LRY8tFF13kbo9cXVir3eqxtBpuly5d7MiRIxnqFtKihqGPqd/1mnr27OlWGNbr1KrCoULLp99XrlzpVjXW796233zzjV1xxRVWtGhRVza1MO3bty9ZWQYMGGAVK1a0c845J9gF8s4777jVovW39evXt40bN9ry5cutXr16roXmmmuucavrpkQtXm3btnX7XY9RvXp1t4Kx57HHHnMrN5944omuBax3795h+02vQe/Rq6++alWqVHHP2blzZ7f68JAhQ9w+0WdAZY/cL2PGjHHl0/PqsUPf92jWrl3rttdzaJXgO+64w3799dew96Nr167ufSpbtmxwVez8hoACABmg8DF79mzbtm2bNWjQIOo2qrS0RL13qnFVnuvXr3etLxq7IQsWLHAVsSrMSMuWLXM/P/vsM9e18v777wfvmz9/vmu90M/JkyfbpEmT3CWr9Fjqslm6dKmrkBU+5syZE3Vblem8886zRx55xP3+6KOP2v79+11Fqm4gBYtp06a58quiDTV37lzXGqXHnjVrVliX0ZNPPulCX1xcnP3rX/9ygenFF1+0hQsX2ubNm61Pnz4pll+BY926dfbxxx+7fa3QoMrdU7x4cbeftI0ec/z48TZs2LCwx9B+1d/r/X3zzTdtwoQJrpXoxx9/dO/Xs88+68qofRT53K1bt7Y1a9a4kNSmTRtXhmjUragQp/C5YsUK91y7du2yW265Jdn7UbhwYfvyyy9t7Nixlh/RxQMg31IFGTl+QkfM0VSqVMn9PHTokB07dsxV4AohKdFR8Msvv+x+/+KLL1yFpKNwhZYaNWq4n5deemnUv1UrgKgVIrJrRQHgpZdeshNOOME9jipQVfodO3a0rLjgggtcSBC1Pug59LhXXnllsm1VJoUI7TuvfKrwDx48aFOmTHFBR/QYLVu2dBW7WgpE973yyivBLgtvHSOFHK+lQON7brvtNvf8jRs3drd16NAh1SCmwKh9rBYXqVq1atj9ChYe3afne+utt1wI8uh9VQuKwsy5557rxh4pTH300UdWsGBB1+Kj16JwGBpOb775Zrvnnnvc7/3793fha+TIkTZ69Ohk5dQ+UTkHDhwYvE3PWblyZddqpFYe7z0YMmSI5WcEFAD5liogHWmH0tHx7bffnmxbHcWr4lJAUQuHWgbUHaKxKNEofKiiVbeEjr4VWLyAosp28eLFYZVjeqnlQuHEo64eda1klQJKKD3u7t270/33ajGoXbt2MJyIwoUqfVXyXkCpVatW1PEUoc8fum3obamVR++DWjHUAnPVVVe5rqRGjRoF73/77bdtxIgRrpVE3U5Hjx51Y4pCKbjoPQ59Tu1rhZPUypGQkJDsekqzdtTKooATbWCxyuYFlLp161p+R0ABkG+pMj3rrLPCblNzfjQaF6JZPV5IUJDReISUAooqVwUYhRNdtK0Cio7A1QWi8Q+hFWh6FSpUKFl3kkJASlS5qlsqVLQxKxl93MwKDTApPb+eO9ptqZVHYzq2bt3qWjvUgqHByxqfo/E6iYmJruulX79+rpWmZMmSrvXkhRdeSLEM3nPGer8oHHmtSpFCxygVS2E/5SeMQQGATNCRtcaUpEQVmQZ9fvDBB/btt99akyZNXCuBWmDU9aOuiJQqIa+FIaXupoxQd5HGiYTKjnNy1KxZ07UOaCyKR+MnvK6R40GvtV27dvb666/b8OHDbdy4ce52tVZpttETTzzh9ru6TxRmYmXJkiXJrmt/RFOnTh33eVBrjcJx6IVQEo6AAgDpoGb9nTt3uopNA0Bfe+01a9WqVap/o24dDbbU7BA16auy1riVqVOnpjj+RDRbRDNCvAGUe/fuzXS5NSBTgzE1NmTTpk1unIlmkcSaWih0jhgFBD2+ujEeeOABN0PF67LJThpAqzCowbQKABpf5IUEBRKNUVGribpR1NUzffr0mD23Pg8aR6IxJNq/XhdgNGrV+f33390YG7WkqTyffPKJtW/fPiaBNC8hoABAOqgVQE3wOtLVlNV7773XDYRMjUKIKp3IKb2Rt0XSAFRVompp0XTctIJQatSloVkmGu+iWUN//vmn3XnnnRZrmo2kilaVr57nn//8p+tm0aDQ40GtTr169XKtVAqBauFSIJHrr7/eunfv7kKDwqJaVLRPYkVdR3ouPbeCoEKpBtlGo/dTLUv6DGisjLoCNZ1Y3YehY11gViAQ2TmZCyQlJbk+RB1VRA5yioVhczaGXW+47f+aCUPNLLg57HrtRb8mL2f8//oTU7KwafjAtMxIOLNMmtt0vrBzlp8HiKRZG1u2bHHjM3T0DOQ36spTa0xOLUGQ274XMlJ/E9cAAIDvEFAAAIDvMM0YAIBMyoWjJHINWlAAAIDvEFAAAIDvEFAAAIDvEFAAAIDvEFAAAIDvEFAAAIDvEFAAAKnSafl1OvbUTJo0Kbja8/Fw11135buztxYoUMBmzJhh+UWGz4PyxRdf2HPPPWcrV650K2SGnuJXS3g/+eSTbrnrH374wZ3OtlmzZjZ48GC3/oBHazVoEakPP/zQrT3QunVre/HFF91iWgDyhtGrRx+358rMUg6q4CZPnmyDBg2yxx9/PHi7KoAbb7wxR89vocpegWDPnj1pnlpd1z1af0bftVoHR68rPj4+JuV5//33rVChQsHrWolX5UsrtGQn1RmcgyRvy3ALipbSrl27to0aNSrZfQcOHLBVq1a5RZj0Ux/qDRs2uIWaIle91GqTc+bMcStOKvR06tQpa68EADJI64Q8++yz9scff1huNnHiRHfAqPVPRo8e7VZafuaZZ2L2+KVLl7bixYubH2iRvWPHjrkD4Ky02CjcHD161PKTw4cPW54OKNdcc4374OsII5I+MAodt9xyi1v5s2HDhm4lS7W2aKlrWb9+vVtC/JVXXrEGDRpYkyZN3IqgWgnyp59+is2rAoB0UAtvhQoVXGtDahYtWmT/+Mc/rGjRola5cmV78MEH3cFaaItC//797bbbbrNixYrZaaedFnYQp8rwqaeesipVqrhWDbVy6DFiRRW1XofKdt1117nVj3WQmBK1sGhlX49aQtQS89133wUrMr2Ozz77LFkXj37funWrWx1YfxPagiNa0bhmzZquRfzqq692wSkln3/+ufv7//znP24lYAVG1Rtr165N1nU0c+ZMt0Kw9p/qk8gunkOHDrl9Wq5cOfc4qluWL1+e7Lk+/vhjq1u3rnscva+R9Nq1b7RytR7n9NNPD/t8DB061K1ArP2j/d25c2fbt29fsvLq4Fv1oFZ51v7WAbxa7PRZKVWqlCurwlZ6P0PRbN++3dW3ej6FSL3v//3vf4P3e/towIAB7jOn8uQm2T4GRSsW6kPhJd3ExET3e7169cK+JNTVs3Tp0qiPoQ+eVkAMvQBAVqlLZODAge4g6ccff4y6zffff+8qWnVFf/311/b222+7ii20ghd1fat1+auvvnJdRg899JA7YJP33nvPhg0bZi+//LJt2rTJdSOpkssOGzdutHnz5rkDwJRceumlrsL2LFiwwMqWLRu8TRW7uuwbNWqU7G/VMl6pUiV7+umnXfgIDSCqhJ9//nnXgqOWcQWJRx99NM0y9+jRw1544QX3vKeccoq1bNnSPX/o46qlSwe2an1XCInUs2dPt58VAhTOzjrrLGvevLkbUhBK742GHehgWaEo0ogRI1wYeuedd1wPwNSpU1148Kiu0jYqh55L+1rPHUrl1TY68NYBufarDuo1/EEX7R99Ft599910f4Yiaf/o9alla+HChfbll18GQ2FoS8ncuXPd6/B6LHKTuOxecvmxxx5zidBbVnnnzp3JPlxxcXEu/em+aJRe+/Xrl51FBZBPqeK48MILrW/fvjZhwoSo3z/qlvZaEKpXr+4qH1XyY8aMCS4n37hx4+BYlrPPPttVGAolV155pauo1cKhgzGN5VBLysUXX5zmwV16x+XpO1ZhS10WOqBTK0qvXr1S3F6tIKr8fvnlF/f9u27dOtc1r4r0vvvucz/r16/vjv4j6btaz6WKUa8pstIcO3asnXnmme66QpyCTFq077WfRJW+ApDG2ah1wHtcdV2p8o5GrVl6L9R6oVZ+GT9+vKuU9Z4qAHlUHu+5otF7pfdYLTA6uFYLSqjQcTcKLupR0D5T+UL3g8rj7Qe1oCiU7Nq1y72nagm6/PLLbf78+XbrrbcG/y61z1AkBWV1dSm0ea1Y6upTA4Dev6uuusrdptYYbVO4cGHLbbKtBUVvkD5catrUG5UV+o+m/6zeRc1aABArOjpXxaij6khr1qxxFZ8qFu+iI1dVDhrz4UlISAj7O133Hu/mm2+2v/76y8444wzr2LGjq3zTGv+gALB69epkl2hUiek+lVVHyWpFueOOO1J87PPPP98FDbWc6Oj7oosucqFG10U/FWIySoHGq5RF3SS7d+9O8+9C953Kpa6I0PdClWu01o7QVi7VOargPQqCCoGR72lo63006hbRvlQZ1A3z6aefht2vbq+mTZu6Lhi9R9rPv/32m2s1SWk/lC9f3oWZ0MCp2yL3TWqfoUh6rzdv3uzK4H0ute/UMKD94VFLXW4MJ9nWguKFE/VTqvnLaz0RJe7IN0X/UdUMF5nGPeorjNVodACIdMkll7jQoYMhVVChNL7g3nvvjTpmRC0h6aGxCmpmV+Wmo3qNW1BzvoJA6OyYUOpKUDdFeui709tWFeuff/7pWlV0dB/tMXTErdesI219tyqMKACo9UXjPxYvXpyurplIka9FzxOLmTYa+xM51iWz1KKQmjp16rjgqbEqer9Ul6nlS90xGt+hIHf//fe7cR0KBOru69Chg+tW8Vqcou2HaLcp5GbWvn373FgadUFFUjdZel9vvgooXjhRP6uar8qUKZMsEWrqnAbOaueKQozeqNT6TAEgO2lcgrp6IgcSqsJSF0haYWHJkiXJrmuwaGglq7EVunTp0sVq1Khh33zzjXv8WFMXjKjVJiXqolI3iAKKKlsFIoUWBScFldDWiEg6Ig8d4JlV2lde2NOMKrUAhe67tKi1QmVSl4jXJaO6SGNaMjMVWgfV6nrRRd0zGtehg2jVW6qrNF5G+0s0ViVW0voMhdLnRt08GjIR2giQl8RlJrWpWcmjpKnmMCVJNefpzdQAJTUz6gPsjSvR/foAaWfrzVYzp/oq9SFSP2WbNm3CzpUCAMeTmsI11kTjS0JpHJ1mluh76p577nFHpAosagnRLEWPKschQ4a4WRO6b9q0aW52iqiLSN+HOgjTUfbrr7/uAkvk+IbM0kGfvmtVeergUOMsNIYhtUperSaaiaPvZY238G5Ty4nGn6R25K3uCg2C1fe2Ao4G2GaFyquDWXV7PPHEE+7xMnISNpVVrRoaa6K6RmFH74W6XdS6kRGapaO6TN1eCiF6H9VCpbEdCqmqszSoWkFT77nqsVhJ7TMUSZ9VhUnN3NH+07gd9VpoELMG7ep6bpfhMSgrVqxwb5wu8vDDD7vf+/TpYzt27HCjnzUaXkciepO9i5oMPWqS0tGD+vGuvfZa959j3LhxsX1lAJBB+qKPbHZX14e6YnRUr6nG3vdd5AHVI488Evx+VNeKKjp1G4kqN7VWqFVCj6euA52oMrKFObPat2/vvmdVKalr57zzznNdFBoAm1ogU7n0Xe2NjVBAUZBKa/yJ9pO6O9RyEdqdkJXWKw3aVau6gpb2TUbHTegxNNNKY0LUuqADaU151pTejNCYDoUEjVVRUNPr1MwbhRUN0tX7qjFLGsejuiytKeoZkdpnKJKCrkKiwthNN93kwqjCmMag5JUWlQKBXHgqPk0z1jlXNGA2O96IYXM2hl1vuC15eJpZ8H+tSFJ70a/Jyxl/aprPtbBpygO/0ivhzDLZcqZNIC36MlQrarVq1YKzWfIjP5xZNTfSGBjNZlG3zvE8Tb4f5aXP0MFUvhcyUn+zFg8AAPAdAgoAAMhfJ2oDgPwg9PTiSD+NdcmFowyyBZ+h5GhBAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAf0Oq2M2bMSHUbrbSckTVqYnF20+HDh1t+OrOt3getbYScx3lQAGSLX0b+byG97HbKA10z/Deq7FURRYaCyNOve9c9OnX3GWec4daO6dSpk8XKzz//HFw3RufE0GnCv/rqK7dWTk7RasCpLRoIZCcCCgCkw4YNG9zaIX/99ZdbzE6r52qxPC16GgtaMdcvDh8+7Bbry+pCgN7j5Cf58TVnF7p4ACAdypUr50KEWjYefPBB93PVqlVRt9XZUVW5v/vuu8HbvBXePYsWLbL4+Hg7cOBAsi4ePbZoVVvdHrm68PPPP+8eS6shd+nSxY4cOZJiuZ966in33C+//LJVrlzZrYJ7yy23uMXaIruOBgwY4FZpPuecc6J28Wzbts1atWrlVj9WWNPj7Nq1K9lzvfLKK6kuILl161Zr2bKlazFSC41WX9aKwaLVlLUqr/6+aNGiriwvvvhi2N975R04cKCVL1/etXRpheWjR49ajx49rHTp0m5l54kTJwb/Rq1S2pdvvfWWNWrUyJVNKxJrperU6H3SKtYqi/af3vv9+/cH79c+6t+/v915551un8SyVS2/I6AAQAYofMyePdtV1g0aNIi6jSrCSy65xHUPibqL1q9f71pfvvvuO3ebKsb69eu7wBBp2bJl7udnn33mun7ef//94H3z58+377//3v2cPHmyTZo0yV1Ss3nzZnvnnXdcy4/Krq6jzp3DVzifO3euayWaM2eOzZo1K9ljHDt2zIWT33//3ZVd2/3www926623Jnuu9957z5V59erVUcujUHXo0CH74osv7JtvvrFnn33WhR7veRQupk2bZuvWrbM+ffrYv//9b1f+UPPmzbOffvrJPcbQoUOtb9++dt1117nQs3TpUrvvvvvs3nvvtR9//DHs7xRgHnnkEbcPEhISXFD67bffopZT+/nqq6+21q1b29dff21vv/22Cyxdu3ZNFhhr167tHrN3796pvhdIP7p4AORbqoi9itGjI/hoVGmKKlZVojpiVwhJiVo91GohqkTVGqIWGIWWGjVquJ+XXnpp1L/1ulbUQhLZ9aMK+KWXXrITTjjBPU6LFi1cuOjYsWOKZTl48KBNmTLFTjvtNHd95MiR7u9eeOGF4OOrJUMtHyl1T+g5FCa2bNniWhJEj6nWD41VUdjyujh0e2rdQwp3qvRr1arlrmtMj6dQoULWr1+/4HW1pCQmJrqAohYbj1pJRowYYQULFnStLEOGDHGtUQoz0qtXLxs8eLALFG3atAn+ncKFnlvGjBnjAtuECROsZ8+eyco5aNAga9u2rXXr1s1dr169untOvW/6W6+F6IorrnChB7FFCwqAfEuDX3WUH3pRJR3NwoULw7ZR94IqqZSoElMLwC+//OJaHBRYdFEwUZfM4sWLk3XdpIcCgcKJR109u3fvTvVvqlSpEgwnopYDhSy1mHgUFlIbO6EWIAUTL5zIueee67pXdJ/n9NNPT3PsirpJnnnmGWvcuLFr+VDrRKhRo0ZZ3bp13eMoQI4bN86Fmsj9oHDiUVePF3hE+0gBL3Lf6LV74uLirF69emHlD7VmzRrXOqUyeJfmzZu7faeg5tFjIPYIKADyLbUanHXWWWGX0Io8lI7kdb8qxvbt29sdd9zhxmykRJWljvIVTkIDin5Xi4NCisZCZJRaGCK7k1RhZlWsZuuk53Huuece1z2kfahWGVXwatURjRF59NFH3TiUTz/91AVC7W+1zKS1H2K9b/bt2+e6iUIDrELLpk2b3ADpjLxmZBwBBQAyQUfoGlOSElWOGlz5wQcf2LfffmtNmjSxCy64wHURqetHlXJKFZvXkpFSd1NGqfVB4zU8S5YsCXaNpFfNmjVt+/bt7uJRC5GmaqslJaPUEqNxIhqrou6R8ePHu9u//PJLF9w0RkbdYgqFGgsSK3rtHg2qXblypXtt0dSpU8e9xsgQqwszdbIfAQUA0kFdBTt37nQzUDSA87XXXnODRlOjFpM333zTzWxR94BCgcatTJ06NcXxJ96MIc0a0fgIzZIJnXGTGRor0a5dO3f0r64qdbFoPEdGpjY3a9bMtQppTIZmL2kgr2au6HVktItDYzo++eQT102ix9KAXy8kaJzHihUr3P0bN250g07V4hQr6j6aPn26G6yswboawHz33XdH3faxxx5zXXEat6LWE7WcKHBGDpJF9iCgAEA6qLVB4z109KyKS03/XrdESlR5qxUkdKyJfo+8LZLGRmgwplpaNO03rSCUFpX5pptusmuvvdauuuoq15IzevToDD2GWoRUOWuQrkKWAosGt2pmS0bp9SscKJRolszZZ58dLI/2q8qq2UGaJaUZNpEzjrJCA2d10awbDaCdOXOmlS1bNuq22k/qklNQUmuYWnQ0q0jvCbJfgYDmzOUySUlJVrJkSXdUoXnnsTZszsaw6w23jUu2zcyCm8Ou1170a/Jyxv/vnAcpWdj0AsuqhDPLpLlN5wtj9x8cCJ0doqPg1M55gZylc5Po/CopTfnNL/xydt78/r2QlIH6mxYUAADgOwQUAADgOwQUAMjjXTz5vXvHOyW9RjTQvZN7EFAAAIDvEFAAAIDvEFAAZFkunAwIwOffBwQUAJnmnVpci7QBQOj3QeTSAxnFasYAsnS6dy0W5y3IduKJJ7oTegHIny0nBw4ccN8H+l4IXdQyMwgoALLEO116WivqAsgfTj755Awto5ASAgqALFGLiU4Br/VjtEIvgPyrUKFCWW458RBQAMSEvpRi9cUEAAySBQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAuT+gfPHFF9ayZUurWLGiO4PkjBkzkp2Lv0+fPu7MkkWLFrVmzZrZpk2bwrb5/fffrW3btlaiRAl3StwOHTrYvn37sv5qAABA/gwo+/fvt9q1a9uoUaOi3j9kyBAbMWKEjR071pYuXWrFihWz5s2b28GDB4PbKJx8++23NmfOHJs1a5YLPZ06dcraKwEAAHlGhk91f80117hLNGo9GT58uD355JPWqlUrd9uUKVOsfPnyrqWlTZs2tn79eps9e7YtX77c6tWr57YZOXKkXXvttfb888+7lhkAAJC/xXQMypYtW2znzp2uW8dTsmRJa9CggSUmJrrr+qluHS+ciLYvWLCga3GJ5tChQ5aUlBR2AQAAeVdMA4rCiajFJJSue/fpp1Y9DRUXF2elS5cObhNp0KBBLuh4l8qVK8ey2AAAwGdyxSyeXr162d69e4OX7du353SRAABAbgkoFSpUcD937doVdruue/fp5+7du8PuP3r0qJvZ420TKT4+3s34Cb0AAIC8K6YBpVq1ai5kzJ07N3ibxotobElCQoK7rp979uyxlStXBreZN2+eHTt2zI1VAQAAyPAsHp2vZPPmzWEDY1evXu3GkFSpUsW6detmzzzzjFWvXt0Flt69e7uZOTfccIPbvmbNmnb11Vdbx44d3VTkI0eOWNeuXd0MH2bwAACATAWUFStW2OWXXx68/vDDD7uf7dq1s0mTJlnPnj3duVJ0XhO1lDRp0sRNKy5SpEjwb6ZOnepCSdOmTd3sndatW7tzpwAAAGQqoFx22WXufCcp0dlln376aXdJiVpb3njjDd4BAACQe2fxAACA/IWAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAA8n5A+fvvv613795WrVo1K1q0qJ155pnWv39/CwQCwW30e58+fezUU0912zRr1sw2bdoU66IAAIBcKuYB5dlnn7UxY8bYSy+9ZOvXr3fXhwwZYiNHjgxuo+sjRoywsWPH2tKlS61YsWLWvHlzO3jwYKyLAwAAcqG4WD/g4sWLrVWrVtaiRQt3vWrVqvbmm2/asmXLgq0nw4cPtyeffNJtJ1OmTLHy5cvbjBkzrE2bNrEuEgAAyO8tKI0aNbK5c+faxo0b3fU1a9bYokWL7JprrnHXt2zZYjt37nTdOp6SJUtagwYNLDExMepjHjp0yJKSksIuAAAg74p5C8rjjz/uAkSNGjXshBNOcGNSBgwYYG3btnX3K5yIWkxC6bp3X6RBgwZZv379Yl1UAACQX1pQ3nnnHZs6daq98cYbtmrVKps8ebI9//zz7mdm9erVy/bu3Ru8bN++PaZlBgAAebwFpUePHq4VxRtLUqtWLdu6datrBWnXrp1VqFDB3b5r1y43i8ej6xdeeGHUx4yPj3cXAACQP8S8BeXAgQNWsGD4w6qr59ixY+53TT9WSNE4FY+6hDSbJyEhIdbFAQAAuVDMW1BatmzpxpxUqVLFzjvvPPvqq69s6NChdvfdd7v7CxQoYN26dbNnnnnGqlev7gKLzptSsWJFu+GGG2JdHAAAkAvFPKDofCcKHJ07d7bdu3e74HHvvfe6E7N5evbsafv377dOnTrZnj17rEmTJjZ79mwrUqRIrIsDAAByoZgHlOLFi7vznOiSErWiPP300+4CAAAQibV4AACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAABA/ggoO3bssNtvv93KlCljRYsWtVq1atmKFSuC9wcCAevTp4+deuqp7v5mzZrZpk2bsqMoAAAgF4p5QPnjjz+scePGVqhQIfv4449t3bp19sILL1ipUqWC2wwZMsRGjBhhY8eOtaVLl1qxYsWsefPmdvDgwVgXBwAA5EJxsX7AZ5991ipXrmwTJ04M3latWrWw1pPhw4fbk08+aa1atXK3TZkyxcqXL28zZsywNm3axLpIAAAgv7egzJw50+rVq2c333yzlStXzi666CIbP3588P4tW7bYzp07XbeOp2TJktagQQNLTEyM+piHDh2ypKSksAsAAMi7Yh5QfvjhBxszZoxVr17dPvnkE7v//vvtwQcftMmTJ7v7FU5ELSahdN27L9KgQYNciPEuaqEBAAB5V8wDyrFjx6xOnTo2cOBA13rSqVMn69ixoxtvklm9evWyvXv3Bi/bt2+PaZkBAEAeDyiamXPuueeG3VazZk3btm2b+71ChQru565du8K20XXvvkjx8fFWokSJsAsAAMi7Yh5QNINnw4YNYbdt3LjRTj/99OCAWQWRuXPnBu/XmBLN5klISIh1cQAAQC4U81k83bt3t0aNGrkunltuucWWLVtm48aNcxcpUKCAdevWzZ555hk3TkWBpXfv3laxYkW74YYbYl0cAACQC8U8oNSvX9+mT5/uxo08/fTTLoBoWnHbtm2D2/Ts2dP279/vxqfs2bPHmjRpYrNnz7YiRYrEujgAACAXinlAkeuuu85dUqJWFIUXXQAAACKxFg8AAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAMh/AWXw4MFWoEAB69atW/C2gwcPWpcuXaxMmTJ20kknWevWrW3Xrl3ZXRQAAJBLZGtAWb58ub388st2wQUXhN3evXt3+/DDD23atGm2YMEC++mnn+ymm27KzqIAAIBcJNsCyr59+6xt27Y2fvx4K1WqVPD2vXv32oQJE2zo0KF2xRVXWN26dW3ixIm2ePFiW7JkSXYVBwAA5CLZFlDUhdOiRQtr1qxZ2O0rV660I0eOhN1eo0YNq1KliiUmJkZ9rEOHDllSUlLYBQAA5F1x2fGgb731lq1atcp18UTauXOnFS5c2E4++eSw28uXL+/ui2bQoEHWr1+/7CgqAADIDy0o27dvt4ceesimTp1qRYoUiclj9urVy3UNeRc9BwAAyLtiHlDUhbN7926rU6eOxcXFuYsGwo4YMcL9rpaSw4cP2549e8L+TrN4KlSoEPUx4+PjrUSJEmEXAACQd8W8i6dp06b2zTffhN3Wvn17N87kscces8qVK1uhQoVs7ty5bnqxbNiwwbZt22YJCQmxLg4AAMiFYh5Qihcvbueff37YbcWKFXPnPPFu79Chgz388MNWunRp1xrywAMPuHDSsGHDWBcHAADkQtkySDYtw4YNs4IFC7oWFM3Qad68uY0ePTonigIAAPJrQPn888/Drmvw7KhRo9wFAAAgEmvxAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAACAvB9QBg0aZPXr17fixYtbuXLl7IYbbrANGzaEbXPw4EHr0qWLlSlTxk466SRr3bq17dq1K9ZFAQAAuVTMA8qCBQtc+FiyZInNmTPHjhw5YldddZXt378/uE337t3tww8/tGnTprntf/rpJ7vppptiXRQAAJBLxcX6AWfPnh12fdKkSa4lZeXKlXbJJZfY3r17bcKECfbGG2/YFVdc4baZOHGi1axZ04Wahg0bxrpIAAAgl8n2MSgKJFK6dGn3U0FFrSrNmjULblOjRg2rUqWKJSYmZndxAABAfmxBCXXs2DHr1q2bNW7c2M4//3x3286dO61w4cJ28sknh21bvnx5d180hw4dchdPUlJSdhYbAADk5RYUjUVZu3atvfXWW1keeFuyZMngpXLlyjErIwAAyEcBpWvXrjZr1iybP3++VapUKXh7hQoV7PDhw7Znz56w7TWLR/dF06tXL9dV5F22b9+eXcUGAAB5MaAEAgEXTqZPn27z5s2zatWqhd1ft25dK1SokM2dOzd4m6Yhb9u2zRISEqI+Znx8vJUoUSLsAgAA8q647OjW0QydDz74wJ0LxRtXoq6ZokWLup8dOnSwhx9+2A2cVdh44IEHXDhhBg8AAMiWgDJmzBj387LLLgu7XVOJ77rrLvf7sGHDrGDBgu4EbRr82rx5cxs9ejTvCAAAyJ6Aoi6etBQpUsRGjRrlLgAAAJFYiwcAAPgOAQUAAPgOAQUAAPgOAQUAAOSvU93j+Ej8/rd0bNPf/CLhzDJpbtP5ws7HpSwAAH+iBQUAAPgOAQUAAPgOAQUAAPgOAQUAAPgOg2TzmEpJK9PcpvaiX5PdlhR/app/t7DpBZkuFwAAGUELCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8J24nC4AkCvMH5TmJr/MWp38xqr/SPPvTnmga2ZLBQB5Fi0oAADAdwgoAADAdwgoAADAdwgoAADAdxgkC6Rh2JyN1nDbb6luM7PgZqu959dktyetX5Dm4y987w+LpYQzy6S5TecLO8f0OYH8/h2RllVJb5ufJOSC74kcbUEZNWqUVa1a1YoUKWINGjSwZcuW5WRxAACAT+RYQHn77bft4Ycftr59+9qqVausdu3a1rx5c9u9e3dOFQkAAOT3gDJ06FDr2LGjtW/f3s4991wbO3asnXjiifbqq6/mVJEAAEB+HoNy+PBhW7lypfXq1St4W8GCBa1Zs2aWmJiYbPtDhw65i2fv3r3uZ1JSUraU7+D+fWHX9/91KHmZCh4Ju37g8NFk2/xVIHybaA4fOGixdOivtJ8zp8v6176/0twmu97bzH4eon0GIj8POb1fc+v+BXK7yDrjePw/z63fE95jBgKBtDcO5IAdO3aoZIHFixeH3d6jR4/AxRdfnGz7vn37uu25cOHChQsXLpbrL9u3b08zK+SKWTxqadF4Fc+xY8fs999/tzJlyliBAgWylOQqV65s27dvtxIlSsSotAjFPj4+2M/Zj32c/djHeX8/BwIB+/PPP61ixYppbpsjAaVs2bJ2wgkn2K5du8Ju1/UKFSok2z4+Pt5dQp188skxK4/eIP4zZC/28fHBfs5+7OPsxz4+PnJqP5csWdK/g2QLFy5sdevWtblz54a1iuh6QkJCThQJAAD4SI518ajLpl27dlavXj27+OKLbfjw4bZ//343qwcAAORvORZQbr31Vvvll1+sT58+tnPnTrvwwgtt9uzZVr58+eNWBnUb6Twskd1HiB328fHBfs5+7OPsxz4+PuJzyX4uoJGyOV0IAACAUCwWCAAAfIeAAgAAfIeAAgAAfIeAAgAAfCdfB5RRo0ZZ1apVrUiRItagQQNbtmxZThcp1xo0aJDVr1/fihcvbuXKlbMbbrjBNmzYELbNwYMHrUuXLu4MwCeddJK1bt062cn6kH6DBw92Z1Lu1q1b8Db2cdbt2LHDbr/9drcPixYtarVq1bIVK1YE79e8As0+PPXUU939WkNs06ZNOVrm3Obvv/+23r17W7Vq1dw+PPPMM61///5h67OwnzPmiy++sJYtW7oztOp7YcaMGWH3p2d/6gztbdu2dSdv08lQO3ToYPv2pb3OULYJ5FNvvfVWoHDhwoFXX3018O233wY6duwYOPnkkwO7du3K6aLlSs2bNw9MnDgxsHbt2sDq1asD1157baBKlSqBffv2Bbe57777ApUrVw7MnTs3sGLFikDDhg0DjRo1ytFy51bLli0LVK1aNXDBBRcEHnrooeDt7OOs+f333wOnn3564K677gosXbo08MMPPwQ++eSTwObNm4PbDB48OFCyZMnAjBkzAmvWrAlcf/31gWrVqgX++uuvHC17bjJgwIBAmTJlArNmzQps2bIlMG3atMBJJ50UePHFF4PbsJ8z5qOPPgo88cQTgffff9+tdTN9+vSw+9OzP6+++upA7dq1A0uWLAksXLgwcNZZZwVuu+22QE7JtwFFixJ26dIleP3vv/8OVKxYMTBo0KAcLVdesXv3bvefZMGCBe76nj17AoUKFXJfRJ7169e7bRITE3OwpLnPn3/+GahevXpgzpw5gUsvvTQYUNjHWffYY48FmjRpkuL9x44dC1SoUCHw3HPPBW/Tfo+Pjw+8+eabx6mUuV+LFi0Cd999d9htN910U6Bt27bud/Zz1kQGlPTsz3Xr1rm/W758eXCbjz/+OFCgQAG3wG9OyJddPIcPH7aVK1e6Ji5PwYIF3fXExMQcLVtesXfvXvezdOnS7qf295EjR8L2eY0aNaxKlSrs8wxSF06LFi3C9qWwj7Nu5syZ7uzWN998s+uqvOiii2z8+PHB+7ds2eJOLBm6j7WuiLqI2cfp16hRI7e0ycaNG931NWvW2KJFi+yaa65x19nPsZWe/amf6tbR59+j7VU3Ll26NEfKnStWM461X3/91fWBRp61Vte/++67HCtXXqF1lTQuonHjxnb++ee72/SfQ2swRS7yqH2u+5A+b731lq1atcqWL1+e7D72cdb98MMPNmbMGLcUx7///W+3nx988EG3X7U0h7cfo313sI/T7/HHH3cr6ipAa+FYfR8PGDDAjX8Q9nNspWd/6qdCeai4uDh3kJlT+zxfBhRk/xH+2rVr3RERYkdLoz/00EM2Z84cN7Ab2ROudQQ5cOBAd10tKPosjx071gUUxMY777xjU6dOtTfeeMPOO+88W716tTuo0QBP9jM8+bKLp2zZsi61R85u0PUKFSrkWLnygq5du9qsWbNs/vz5VqlSpeDt2q/qWtuzZ0/Y9uzz9FMXzu7du61OnTruyEaXBQsW2IgRI9zvOhpiH2eNZjice+65YbfVrFnTtm3b5n739iPfHVnTo0cP14rSpk0bN0vqjjvusO7du7vZgMJ+jq307E/91PdLqKNHj7qZPTm1z/NlQFFzbd26dV0faOiRk64nJCTkaNlyK43LUjiZPn26zZs3z00fDKX9XahQobB9rmnI+uJnn6dP06ZN7ZtvvnFHm95FR/tqFvd+Zx9njbolI6fHa5zE6aef7n7X51pf1qH7WF0V6qNnH6ffgQMH3NiGUDpo1PewsJ9jKz37Uz91cKMDIY++y/WeaKxKjgjk42nGGsE8adIkN3q5U6dObprxzp07c7poudL999/vprB9/vnngZ9//jl4OXDgQNgUWE09njdvnpsCm5CQ4C7IvNBZPMI+zvr07bi4ODcNdtOmTYGpU6cGTjzxxMDrr78eNl1T3xUffPBB4Ouvvw60atWK6a8Z1K5du8Bpp50WnGasqbFly5YN9OzZM7gN+znjs/u++uord1HVPnToUPf71q1b070/Nc34oosuclPsFy1a5GYLMs04h4wcOdJ9met8KJp2rLnfyBz9h4h20blRPPqP0Llz50CpUqXcl/6NN97oQgxiF1DYx1n34YcfBs4//3x3AFOjRo3AuHHjwu7XlM3evXsHypcv77Zp2rRpYMOGDTlW3twoKSnJfW71/VukSJHAGWec4c7hcejQoeA27OeMmT9/ftTvYIXB9O7P3377zQUSnZOmRIkSgfbt27vgk1MK6J+cabsBAACILl+OQQEAAP5GQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAOY3/w9bLw3+tnymjQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import neps\n", + "from neps import algorithms\n", + "from functools import partial\n", + "import matplotlib.pyplot as plt\n", + "global_values = []\n", + "eta=3\n", + "for algo in [partial(algorithms.neps_hyperband, eta=eta), \n", + " partial(algorithms.hyperband, eta=eta), \n", + " partial(algorithms.neps_hyperband, sampler=\"prior\", eta=eta), \n", + " partial(algorithms.hyperband, sampler=\"prior\", eta=eta)]:\n", + " neps.run(\n", + " evaluate_pipeline,\n", + " SimpleSpace(),\n", + " root_directory=\"neps_test_runs/algo_tests\",\n", + " overwrite_root_directory=True,\n", + " optimizer=algo,\n", + " fidelities_to_spend=600\n", + " )\n", + "\n", + "plt.hist([v for n,v in enumerate(global_values) if n % 4 == 0], alpha=0.5, label='Neps HB with uniform sampler',bins=10)\n", + "plt.hist([v+1 for n,v in enumerate(global_values) if n % 4 == 1], alpha=0.5, label='HB with uniform sampler',bins=10)\n", + "plt.hist([v+2 for n,v in enumerate(global_values) if n % 4 == 2], alpha=0.5, label='Neps HB with prior sampler',bins=10)\n", + "plt.hist([v+3 for n,v in enumerate(global_values) if n % 4 == 3], alpha=0.5, label='HB with prior sampler',bins=10)\n", + "plt.legend()\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "70b97bfb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Count of 1 in algo 0: 82\n", + "Count of 3 in algo 0: 52\n", + "Count of 11 in algo 0: 16\n", + "Count of 33 in algo 0: 2\n", + "Count of 1 in algo 1: 80\n", + "Count of 3 in algo 1: 56\n", + "Count of 11 in algo 1: 14\n", + "Count of 33 in algo 1: 4\n", + "Count of 1 in algo 2: 82\n", + "Count of 3 in algo 2: 52\n", + "Count of 11 in algo 2: 16\n", + "Count of 33 in algo 2: 2\n", + "Count of 1 in algo 3: 80\n", + "Count of 3 in algo 3: 56\n", + "Count of 11 in algo 3: 14\n", + "Count of 33 in algo 3: 4\n" + ] + } + ], + "source": [ + "for i in range(4):\n", + " for j in [v for v in range(100) if v in global_values]:\n", + " print(f\"Count of {j:<3} in algo {i}: \", [v for n,v in enumerate(global_values) if n % 4 == i].count(j))\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "neural-pipeline-search (3.13.1)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/neps_examples/basic_usage/priors_test.ipynb b/neps_examples/basic_usage/priors_test.ipynb index f85ebf414..7a4dd4b9d 100644 --- a/neps_examples/basic_usage/priors_test.ipynb +++ b/neps_examples/basic_usage/priors_test.ipynb @@ -17,37 +17,23 @@ "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", "\n", "==================================================\n", - "After adding new float:\n", - "PipelineSpace SimpleSpace with parameters:\n", - "\tint_param1 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.LOW)\n", - "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tparam_4 = Float(0.0, 1.0)\n", - "\n", - "==================================================\n", - "After removing 'int_param1':\n", + "After removing 'int_param1' (in-place):\n", "PipelineSpace SimpleSpace with parameters:\n", "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tparam_4 = Float(0.0, 1.0)\n", "\n", "==================================================\n", - "After adding 'int_param1' twice and once with different upper:\n", - "Error occurred: A different parameter with the name 'int_param1' already exists in the pipeline:\n", - " Float(0.0, 1.0)\n", - " Float(0.0, 2.0)\n", + "After adding 'int_param1' (in-place):\n", "PipelineSpace SimpleSpace with parameters:\n", "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tparam_4 = Float(0.0, 1.0)\n", - "\tint_param1 = Float(0.0, 1.0)\n", + "\tint_param1 = Float(0.0, 1.0, prior=, prior_confidence=)\n", "\n", "==================================================\n", - "After removing 'int_param1':\n", + "After removing 'int_param1' (in-place):\n", "PipelineSpace SimpleSpace with parameters:\n", "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tparam_4 = Float(0.0, 1.0)\n" + "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n" ] } ], @@ -67,34 +53,24 @@ "class OtherSpace(PipelineSpace):\n", " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\", log=False)\n", "\n", - "# Test operations\n", + "# Test in-place operations\n", "pipeline = SimpleSpace()\n", "print(\"Original pipeline:\")\n", "print(pipeline)\n", "\n", "print(\"\\n\" + \"=\"*50)\n", - "print(\"After adding new float:\")\n", - "pipeline=pipeline+neps.Float(0.0, 1.0) \n", - "print(pipeline)\n", - "\n", - "print(\"\\n\" + \"=\"*50)\n", - "print(\"After removing 'int_param1':\")\n", - "pipeline=pipeline.remove(\"int_param1\") \n", + "print(\"After removing 'int_param1' (in-place):\")\n", + "pipeline.remove(\"int_param1\") # This modifies pipeline in-place\n", "print(pipeline)\n", "\n", "print(\"\\n\" + \"=\"*50)\n", - "print(\"After adding 'int_param1' twice and once with different upper:\")\n", - "pipeline=pipeline.add(neps.Float(0.0, 1.0), \"int_param1\")\n", - "pipeline=pipeline.add(neps.Float(0.0, 1.0), \"int_param1\")\n", - "try:\n", - " pipeline=pipeline.add(neps.Float(0.0, 2.0), \"int_param1\")\n", - "except ValueError as e:\n", - " print(f\"Error occurred: {e}\")\n", + "print(\"After adding 'int_param1' (in-place):\")\n", + "pipeline.add(neps.Float(0.0, 1.0), \"int_param1\") # This also modifies in-place\n", "print(pipeline)\n", "\n", "print(\"\\n\" + \"=\"*50)\n", - "print(\"After removing 'int_param1':\")\n", - "pipeline=pipeline.remove(\"int_param1\")\n", + "print(\"After removing 'int_param1' (in-place):\")\n", + "pipeline.remove(\"int_param1\") # This modifies pipeline in-place\n", "print(pipeline)" ] }, From 6f7011d683079c1d23621ca9007c73ab2fbb3b17 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 12 Oct 2025 00:27:29 +0200 Subject: [PATCH 077/156] feat: Enhance DefaultWorker to return cumulative metrics and add add_prior method in PipelineSpace --- neps/runtime.py | 123 +++++++++++++++++---- neps/space/neps_spaces/parameters.py | 55 +++++++++ neps_examples/basic_usage/algo_tests.ipynb | 43 +++---- 3 files changed, 179 insertions(+), 42 deletions(-) diff --git a/neps/runtime.py b/neps/runtime.py index 99b06be45..9c68220f0 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -328,10 +328,12 @@ def _check_shared_error_stopping_criterion(self) -> str | Literal[False]: def _check_global_stopping_criterion( # noqa: C901 self, trials: Mapping[str, Trial], - ) -> str | Literal[False]: + ) -> tuple[str | Literal[False], dict[str, float | int]]: + return_dict: dict[str, float | int] = {} + return_string: str | Literal[False] = False if self.settings.evaluations_to_spend is not None: if self.settings.include_in_progress_evaluations_towards_maximum: - count = sum( + count_evals = sum( 1 for _, trial in trials.items() if trial.metadata.state @@ -339,10 +341,12 @@ def _check_global_stopping_criterion( # noqa: C901 ) else: # This indicates they have completed. - count = sum(1 for _, trial in trials.items() if trial.report is not None) - - if count >= self.settings.evaluations_to_spend: - return ( + count_evals = sum( + 1 for _, trial in trials.items() if trial.report is not None + ) + return_dict["cumulative_evaluations"] = count_evals + if count_evals >= self.settings.evaluations_to_spend: + return_string = ( "The total number of evaluations has reached the maximum allowed of" f" `{self.settings.evaluations_to_spend=}`." " To allow more evaluations, increase this value or use a different" @@ -359,13 +363,14 @@ def _check_global_stopping_criterion( # noqa: C901 fidelity_name = ( f"{NepsCompatConverter._ENVIRONMENT_PREFIX}{fidelity_name}" ) - count = sum( + count_fidelities = sum( trial.config[fidelity_name] for _, trial in trials.items() if trial.report is not None and trial.config[fidelity_name] is not None ) - if count >= self.settings.fidelities_to_spend: - return ( + return_dict["cumulative_fidelities"] = count_fidelities + if count_fidelities >= self.settings.fidelities_to_spend: + return_string = ( "The total number of fidelity evaluations has reached the maximum" f" allowed of `{self.settings.fidelities_to_spend=}`." " To allow more evaluations, increase this value or use a different" @@ -378,8 +383,9 @@ def _check_global_stopping_criterion( # noqa: C901 for _, trial in trials.items() if trial.report is not None and trial.report.cost is not None ) + return_dict["cumulative_cost"] = cost if cost >= self.settings.cost_to_spend: - return ( + return_string = ( f"The maximum cost `{self.settings.cost_to_spend=}` has been" " reached by all of the evaluated trials. To allow more evaluations," " increase this value or use a different stopping criterion." @@ -392,15 +398,16 @@ def _check_global_stopping_criterion( # noqa: C901 if trial.report is not None if trial.report.evaluation_duration is not None ) + return_dict["cumulative_time"] = time_spent if time_spent >= self.settings.max_evaluation_time_total_seconds: - return ( + return_string = ( "The maximum evaluation time of" f" `{self.settings.max_evaluation_time_total_seconds=}` has been" " reached. To allow more evaluations, increase this value or use" " a different stopping criterion." ) - return False + return (return_string, return_dict) @property def _requires_global_stopping_criterion(self) -> bool: @@ -428,7 +435,7 @@ def _get_next_trial(self) -> Trial | Literal["break"]: trials = self.state._trial_repo.latest() if self._requires_global_stopping_criterion: - should_stop = self._check_global_stopping_criterion(trials) + should_stop = self._check_global_stopping_criterion(trials)[0] if should_stop is not False: logger.info(should_stop) return "break" @@ -729,13 +736,26 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 if self.settings.write_summary_to_disk: # Store in memory for later file re-writing - self.state.all_best_configs.append( - { - "score": self.state.new_score, - "trial_id": evaluated_trial.id, - "config": evaluated_trial.config, - } - ) + global_stopping_criterion = self._check_global_stopping_criterion( + self.state._trial_repo.latest() + )[1] + + config_dict = { + "score": self.state.new_score, + "trial_id": evaluated_trial.id, + "config": evaluated_trial.config, + } + if report.cost is not None: + config_dict["cost"] = report.cost + for metric in ( + "cumulative_evaluations", + "cumulative_fidelities", + "cumulative_cost", + "cumulative_time", + ): + if metric in global_stopping_criterion: + config_dict[metric] = global_stopping_criterion[metric] + self.state.all_best_configs.append(config_dict) # Build trace text and best config text trace_text = ( @@ -743,11 +763,41 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 + "-" * 80 + "\n" ) + for best in self.state.all_best_configs: trace_text += ( - f"Objective to minimize: {best['score']}\n" f"Config ID: {best['trial_id']}\n" - f"Config: {best['config']}\n" + "-" * 80 + "\n" + f"Objective to minimize: {best['score']}\n" + + ( + f"Cost: {best.get('cost', 0)}\n" + if "cost" in best + else "" + ) + + ( + "Cumulative evaluations:" + f" {best.get('cumulative_evaluations', 0)}\n" + if "cumulative_evaluations" in best + else "" + ) + + ( + "Cumulative fidelities:" + f" {best.get('cumulative_fidelities', 0)}\n" + if "cumulative_fidelities" in best + else "" + ) + + ( + f"Cumulative cost: {best.get('cumulative_cost', 0)}\n" + if "cumulative_cost" in best + else "" + ) + + ( + f"Cumulative time: {best.get('cumulative_time', 0)}\n" + if "cumulative_time" in best + else "" + ) + + f"Config: {best['config']}\n" + + "-" * 80 + + "\n" ) best_config = self.state.all_best_configs[-1] # Latest best @@ -755,7 +805,34 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 "# Best config:" f"\n\n Config ID: {best_config['trial_id']}" f"\n Objective to minimize: {best_config['score']}" - f"\n Config: {best_config['config']}" + + ( + f"\n Cost: {best_config['cost']}" + if "cost" in best_config + else "" + ) + + ( + "\n Cumulative evaluations:" + f" {best_config['cumulative_evaluations']}" + if "cumulative_evaluations" in best_config + else "" + ) + + ( + "\n Cumulative fidelities:" + f" {best_config['cumulative_fidelities']}" + if "cumulative_fidelities" in best_config + else "" + ) + + ( + f"\n Cumulative cost: {best_config['cumulative_cost']}" + if "cumulative_cost" in best_config + else "" + ) + + ( + f"\n Cumulative time: {best_config['cumulative_time']}" + if "cumulative_time" in best_config + else "" + ) + + f"\n Config: {best_config['config']}" ) # Write files from scratch diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 380a92ecf..14de28dbe 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -326,6 +326,61 @@ class NewSpace(PipelineSpace): return new_pipeline + def add_prior( + self, + parameter_name: str, + prior: Any, + prior_confidence: ConfidenceLevel | Literal["low", "medium", "high"], + ) -> PipelineSpace: + """Add a prior to a parameter in the pipeline. This is NOT an in-place operation. + + Args: + parameter_name: The name of the parameter to which the prior will be added. + prior: The value of the prior to be added. + prior_confidence: The confidence level of the prior, which can be "low", + "medium", or "high". + + Returns: + A NEW PipelineSpace with the added prior. + + Raises: + ValueError: If no parameter with the specified name exists in the pipeline + or if the parameter type does not support priors. + """ + if parameter_name not in self.get_attrs(): + raise ValueError( + f"No parameter with the name {parameter_name!r} exists in the pipeline." + ) + + class NewSpace(PipelineSpace): + pass + + NewSpace.__name__ = self.__class__.__name__ + new_pipeline = NewSpace() + for exist_name, value in self.get_attrs().items(): + if exist_name == parameter_name: + if isinstance(value, Integer | Float | Categorical): + if value.has_prior: + raise ValueError( + f"The parameter {parameter_name!r} already has a prior:" + f" {value.prior!r}." + ) + if isinstance(prior_confidence, str): + prior_confidence = convert_confidence_level(prior_confidence) + old_attributes = dict(value.get_attrs()) + old_attributes["prior"] = prior + old_attributes["prior_confidence"] = prior_confidence + new_value = value.from_attrs(attrs=old_attributes) + else: + raise ValueError( + f"The parameter {parameter_name!r} is of type" + f" {type(value).__name__}, which does not support priors." + ) + else: + new_value = value + setattr(new_pipeline, exist_name, new_value) + return new_pipeline + class ConfidenceLevel(enum.Enum): """Enum representing confidence levels for sampling.""" diff --git a/neps_examples/basic_usage/algo_tests.ipynb b/neps_examples/basic_usage/algo_tests.ipynb index 9a913f13b..0564f4928 100644 --- a/neps_examples/basic_usage/algo_tests.ipynb +++ b/neps_examples/basic_usage/algo_tests.ipynb @@ -2,10 +2,19 @@ "cells": [ { "cell_type": "code", - "execution_count": 11, + "execution_count": 1, "id": "938adc12", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "c:\\Users\\Amega\\Git\\neps\\.venv\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], "source": [ "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled, Integer, Fidelity\n", "import neps\n", @@ -20,35 +29,30 @@ "def evaluate_pipeline(int_param1, int_param2, *args, **kwargs):\n", " # Dummy evaluation function\n", " global_values.append(int_param1)\n", - " return - int_param2/50 +int_param1" + " return {\"objective_to_minimize\": -int_param2/50 + int_param1,\n", + " \"cost\": int_param1}" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 2, "id": "89427fd0", "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Grid search does not support priors, they will be ignored.\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ - "# Configs: 40\n", + "# Configs: 20\n", "\n", - " success: 40\n", + " success: 20\n", "\n", - "# Best Found (config 30):\n", + "# Best Found (config 17_rung_0):\n", "\n", - " objective_to_minimize: 0.48\n", - " config: {'SAMPLING__Resolvable.int_param2::integer__1_100_False': 26.0, 'SAMPLING__Resolvable.int_param3::integer__1_100_False': 1.0, 'SAMPLING__Resolvable.cat::categorical__3': 0, 'ENVIRONMENT__int_param1': 1}\n", - " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\basic_usage\\neps_test_runs\\algo_tests\\configs\\config_30\n", + " objective_to_minimize: -1.0\n", + " config: {'int_param2': 100, 'int_param3': 54, 'cat': 'option3', 'int_param1': 1}\n", + " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\basic_usage\\neps_test_runs\\algo_tests\\configs\\config_17_rung_0\n", + " cost: 1.0\n", "\n" ] } @@ -65,8 +69,9 @@ " SimpleSpace(),\n", " root_directory=\"neps_test_runs/algo_tests\",\n", " overwrite_root_directory=True,\n", - " optimizer=partial(neps.algorithms.neps_grid_search, ignore_fidelity=True, size_per_numerical_dimension=5),\n", - " evaluations_to_spend=40\n", + " optimizer=partial(neps.algorithms.hyperband),#, ignore_fidelity=True, size_per_numerical_dimension=5),\n", + " # fidelities_to_spend=40,\n", + " cost_to_spend=20\n", ")\n", "neps.status(\"neps_test_runs/algo_tests\",print_summary=True)\n", "print()" From 97be1004c296d86da25aee07dcccc42f7667c3ce Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 13 Oct 2025 23:25:59 +0200 Subject: [PATCH 078/156] feat: Refactor trace text generation into a reusable function for better readability and maintainability --- neps/runtime.py | 197 ++++++++++++++++++++++-------------------------- 1 file changed, 92 insertions(+), 105 deletions(-) diff --git a/neps/runtime.py b/neps/runtime.py index 9c68220f0..2744c0829 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -59,6 +59,83 @@ logger = logging.getLogger(__name__) +def _build_trace_texts(best_configs: list[dict]) -> tuple[str, str]: + """Build trace text and best config text from a list of best configurations. + + Args: + best_configs: List of best configuration dictionaries containing + 'trial_id', 'score', 'config', and optional metrics. + + Returns: + Tuple of (trace_text, best_config_text) strings. + """ + trace_text = ( + "Best configs and their objectives across evaluations:\n" + "-" * 80 + "\n" + ) + + for best in best_configs: + trace_text += ( + f"Config ID: {best['trial_id']}\nObjective to minimize: {best['score']}\n" + + (f"Cost: {best.get('cost', 0)}\n" if "cost" in best else "") + + ( + f"Cumulative evaluations: {best.get('cumulative_evaluations', 0)}\n" + if "cumulative_evaluations" in best + else "" + ) + + ( + f"Cumulative fidelities: {best.get('cumulative_fidelities', 0)}\n" + if "cumulative_fidelities" in best + else "" + ) + + ( + f"Cumulative cost: {best.get('cumulative_cost', 0)}\n" + if "cumulative_cost" in best + else "" + ) + + ( + f"Cumulative time: {best.get('cumulative_time', 0)}\n" + if "cumulative_time" in best + else "" + ) + + f"Config: {best['config']}\n" + + "-" * 80 + + "\n" + ) + + best_config_text = "" + if best_configs: + best_config = best_configs[-1] # Latest best + best_config_text = ( + "# Best config:" + f"\n\n Config ID: {best_config['trial_id']}" + f"\n Objective to minimize: {best_config['score']}" + + (f"\n Cost: {best_config['cost']}" if "cost" in best_config else "") + + ( + f"\n Cumulative evaluations: {best_config['cumulative_evaluations']}" + if "cumulative_evaluations" in best_config + else "" + ) + + ( + f"\n Cumulative fidelities: {best_config['cumulative_fidelities']}" + if "cumulative_fidelities" in best_config + else "" + ) + + ( + f"\n Cumulative cost: {best_config['cumulative_cost']}" + if "cumulative_cost" in best_config + else "" + ) + + ( + f"\n Cumulative time: {best_config['cumulative_time']}" + if "cumulative_time" in best_config + else "" + ) + + f"\n Config: {best_config['config']}" + ) + + return trace_text, best_config_text + + _DDP_ENV_VAR_NAME = "NEPS_DDP_TRIAL_ID" @@ -757,82 +834,9 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 config_dict[metric] = global_stopping_criterion[metric] self.state.all_best_configs.append(config_dict) - # Build trace text and best config text - trace_text = ( - "Best configs and their objectives across evaluations:\n" - + "-" * 80 - + "\n" - ) - - for best in self.state.all_best_configs: - trace_text += ( - f"Config ID: {best['trial_id']}\n" - f"Objective to minimize: {best['score']}\n" - + ( - f"Cost: {best.get('cost', 0)}\n" - if "cost" in best - else "" - ) - + ( - "Cumulative evaluations:" - f" {best.get('cumulative_evaluations', 0)}\n" - if "cumulative_evaluations" in best - else "" - ) - + ( - "Cumulative fidelities:" - f" {best.get('cumulative_fidelities', 0)}\n" - if "cumulative_fidelities" in best - else "" - ) - + ( - f"Cumulative cost: {best.get('cumulative_cost', 0)}\n" - if "cumulative_cost" in best - else "" - ) - + ( - f"Cumulative time: {best.get('cumulative_time', 0)}\n" - if "cumulative_time" in best - else "" - ) - + f"Config: {best['config']}\n" - + "-" * 80 - + "\n" - ) - - best_config = self.state.all_best_configs[-1] # Latest best - best_config_text = ( - "# Best config:" - f"\n\n Config ID: {best_config['trial_id']}" - f"\n Objective to minimize: {best_config['score']}" - + ( - f"\n Cost: {best_config['cost']}" - if "cost" in best_config - else "" - ) - + ( - "\n Cumulative evaluations:" - f" {best_config['cumulative_evaluations']}" - if "cumulative_evaluations" in best_config - else "" - ) - + ( - "\n Cumulative fidelities:" - f" {best_config['cumulative_fidelities']}" - if "cumulative_fidelities" in best_config - else "" - ) - + ( - f"\n Cumulative cost: {best_config['cumulative_cost']}" - if "cumulative_cost" in best_config - else "" - ) - + ( - f"\n Cumulative time: {best_config['cumulative_time']}" - if "cumulative_time" in best_config - else "" - ) - + f"\n Config: {best_config['config']}" + # Build trace text and best config text using shared function + trace_text, best_config_text = _build_trace_texts( + self.state.all_best_configs ) # Write files from scratch @@ -875,35 +879,18 @@ def load_incumbent_trace( # noqa: D103 state.new_score = evaluated_trial.report.objective_to_minimize if state.new_score is not None and state.new_score < _best_score_so_far: _best_score_so_far = state.new_score - state.all_best_configs.append( - { - "score": state.new_score, - "trial_id": evaluated_trial.metadata.id, - "config": evaluated_trial.config, - } - ) - - trace_text = ( - "Best configs and their objectives across evaluations:\n" + "-" * 80 + "\n" - ) - for best in state.all_best_configs: - trace_text += ( - f"Objective to minimize: {best['score']}\n" - f"Config ID: {best['trial_id']}\n" - f"Config: {best['config']}\n" + "-" * 80 + "\n" - ) - - best_config_text = "" - if state.all_best_configs: - best_config = state.all_best_configs[-1] - best_config_text = ( - "# Best config:" - f"\n\n Config ID: {best_config['trial_id']}" - f"\n Objective to minimize: {best_config['score']}" - f"\n Config: {best_config['config']}" - ) - else: - best_config = None + config_dict = { + "score": state.new_score, + "trial_id": evaluated_trial.metadata.id, + "config": evaluated_trial.config, + } + # Add cost if available + if evaluated_trial.report.cost is not None: + config_dict["cost"] = evaluated_trial.report.cost + state.all_best_configs.append(config_dict) + + # Use the shared function to build trace texts + trace_text, best_config_text = _build_trace_texts(state.all_best_configs) with _trace_lock: with improvement_trace_path.open(mode="w") as f: From a2c82a7edcf651b8331011c0c63d6c0a8f19a4eb Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 13 Oct 2025 23:49:15 +0200 Subject: [PATCH 079/156] feat: Refactor load_incumbent_trace to include cumulative metrics tracking and improve readability --- neps/runtime.py | 139 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 103 insertions(+), 36 deletions(-) diff --git a/neps/runtime.py b/neps/runtime.py index 2744c0829..a13e3790c 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -640,13 +640,14 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 previous_trials = self.state.lock_and_read_trials() if len(previous_trials): - load_incumbent_trace( + self.load_incumbent_trace( previous_trials, _trace_lock, self.state, self.settings, improvement_trace_path, best_config_path, + self.optimizer, ) _best_score_so_far = float("inf") @@ -860,43 +861,109 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 "Learning Curve %s: %s", evaluated_trial.id, report.learning_curve ) + def load_incumbent_trace( # noqa: C901 + self, + previous_trials: dict[str, Trial], + _trace_lock: FileLock, + state: NePSState, + settings: WorkerSettings, + improvement_trace_path: Path, + best_config_path: Path, + optimizer: AskFunction, + ) -> None: + """Load the incumbent trace from previous trials and update the state. + This function also computes cumulative metrics and updates the best + configurations. + + Args: + previous_trials (dict): A dictionary of previous trials. + _trace_lock (FileLock): A file lock to ensure thread-safe writing. + state (NePSState): The current NePS state. + settings (WorkerSettings): The worker settings. + improvement_trace_path (Path): Path to the improvement trace file. + best_config_path (Path): Path to the best configuration file. + optimizer (AskFunction): The optimizer used for sampling configurations. + """ + _best_score_so_far = float("inf") -def load_incumbent_trace( # noqa: D103 - previous_trials: dict[str, Trial], - _trace_lock: FileLock, - state: NePSState, - settings: WorkerSettings, # noqa: ARG001 - improvement_trace_path: Path, - best_config_path: Path, -) -> None: - _best_score_so_far = float("inf") + metrics = { + "cumulative_evaluations": 0, + "cumulative_fidelities": 0.0, + "cumulative_cost": 0.0, + "cumulative_time": 0.0, + } + for evaluated_trial in previous_trials.values(): + if ( + evaluated_trial.report is not None + and evaluated_trial.report.objective_to_minimize is not None + ): + metrics["cumulative_evaluations"] += 1 + if ( + settings.cost_to_spend is not None + and evaluated_trial.report.cost is not None + ): + metrics["cumulative_cost"] += evaluated_trial.report.cost + if ( + ( + settings.max_evaluation_time_total_seconds is not None + or evaluated_trial.metadata.evaluation_duration is not None + or settings.max_evaluation_time_total_seconds is not None + ) + and evaluated_trial.metadata.time_started is not None + and evaluated_trial.metadata.time_end is not None + ): + metrics["cumulative_time"] += ( + evaluated_trial.metadata.time_end + - evaluated_trial.metadata.time_started + ) - for evaluated_trial in previous_trials.values(): - if ( - evaluated_trial.report is not None - and evaluated_trial.report.objective_to_minimize is not None - ): - state.new_score = evaluated_trial.report.objective_to_minimize - if state.new_score is not None and state.new_score < _best_score_so_far: - _best_score_so_far = state.new_score - config_dict = { - "score": state.new_score, - "trial_id": evaluated_trial.metadata.id, - "config": evaluated_trial.config, - } - # Add cost if available - if evaluated_trial.report.cost is not None: - config_dict["cost"] = evaluated_trial.report.cost - state.all_best_configs.append(config_dict) - - # Use the shared function to build trace texts - trace_text, best_config_text = _build_trace_texts(state.all_best_configs) - - with _trace_lock: - with improvement_trace_path.open(mode="w") as f: - f.write(trace_text) - with best_config_path.open(mode="w") as f: - f.write(best_config_text) + if hasattr(optimizer, "space"): + if not isinstance(optimizer.space, PipelineSpace): + fidelity_name = next(iter(optimizer.space.fidelities.keys())) + else: + fidelity_name = next(iter(optimizer.space.fidelity_attrs.keys())) + fidelity_name = ( + f"{NepsCompatConverter._ENVIRONMENT_PREFIX}{fidelity_name}" + ) + if ( + fidelity_name in evaluated_trial.config + and evaluated_trial.config[fidelity_name] is not None + ): + metrics["cumulative_fidelities"] += evaluated_trial.config[ + fidelity_name + ] + + state.new_score = evaluated_trial.report.objective_to_minimize + if state.new_score is not None and state.new_score < _best_score_so_far: + _best_score_so_far = state.new_score + config_dict = { + "score": state.new_score, + "trial_id": evaluated_trial.metadata.id, + "config": evaluated_trial.config, + } + + # Add cost if available + if evaluated_trial.report.cost is not None: + config_dict["cost"] = evaluated_trial.report.cost + state.all_best_configs.append(config_dict) + + for metric in ( + "cumulative_evaluations", + "cumulative_fidelities", + "cumulative_cost", + "cumulative_time", + ): + if metrics[metric] > 0: + config_dict[metric] = metrics[metric] + + # Use the shared function to build trace texts + trace_text, best_config_text = _build_trace_texts(state.all_best_configs) + + with _trace_lock: + with improvement_trace_path.open(mode="w") as f: + f.write(trace_text) + with best_config_path.open(mode="w") as f: + f.write(best_config_text) def _save_results( From cb02744c6630c36f6cf09640eb309390b1fda5a6 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 14 Oct 2025 00:57:26 +0200 Subject: [PATCH 080/156] feat: Enhance SamplingResolutionContext with path-aware resolution methods to avoid using a cached resolved object instead of resolving when they are actually different --- neps/space/neps_spaces/neps_space.py | 56 ++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 3 deletions(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 9228c5a7c..f0e16eb61 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -297,6 +297,54 @@ def get_value_from_environment(self, var_name: str) -> Any: f"No value is available for the environment variable {var_name!r}." ) from err + def was_already_resolved_with_path(self, obj: Any) -> bool: + """Check if the given object was already resolved with current path. + + Args: + obj: The object to check if it was already resolved. + + Returns: + True if the object was already resolved with current path, False otherwise. + """ + current_path = ".".join(self._current_path_parts) + # Use object identity (id) instead of equality to ensure different instances + # are treated separately even if they're equal + cache_key = (id(obj), current_path) + return cache_key in self._resolved_objects + + def get_resolved_with_path(self, obj: Any) -> Any: + """Get the resolved value for the given object with current path. + + Args: + obj: The object for which to get the resolved value. + + Returns: + The resolved value of the object. + + Raises: + ValueError: If the object was not already resolved with current path. + """ + current_path = ".".join(self._current_path_parts) + cache_key = (id(obj), current_path) + try: + return self._resolved_objects[cache_key] + except KeyError as err: + raise ValueError( + f"Given object was not already resolved with path {current_path!r}:" + f" {obj!r}" + ) from err + + def add_resolved_with_path(self, original: Any, resolved: Any) -> None: + """Add a resolved object to the context with current path. + + Args: + original: The original object that was resolved. + resolved: The resolved value of the original object. + """ + current_path = ".".join(self._current_path_parts) + cache_key = (id(original), current_path) + self._resolved_objects[cache_key] = resolved + class SamplingResolver: """A class responsible for resolving samplings in a NePS space. @@ -386,8 +434,10 @@ def _( domain_obj: Domain, context: SamplingResolutionContext, ) -> Any: - if context.was_already_resolved(domain_obj): - return context.get_resolved(domain_obj) + # Use path-aware caching to ensure different parameter positions + # with the same domain get sampled independently + if context.was_already_resolved_with_path(domain_obj): + return context.get_resolved_with_path(domain_obj) initial_attrs = domain_obj.get_attrs() final_attrs = {} @@ -410,7 +460,7 @@ def _( raise ValueError(f"Failed to sample from {resolved_domain_obj!r}.") from e result = self._resolve(sampled_value, "sampled_value", context) - context.add_resolved(domain_obj, result) + context.add_resolved_with_path(domain_obj, result) return result @_resolver_dispatch.register From eb985b0b60b1c7ec0e356ddcaf07866eb9fd61c7 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 14 Oct 2025 18:26:32 +0200 Subject: [PATCH 081/156] feat: Update best_config.txt with final cumulative metrics upon stopping criteria --- neps/runtime.py | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/neps/runtime.py b/neps/runtime.py index a13e3790c..2d8be24e6 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -495,7 +495,7 @@ def _requires_global_stopping_criterion(self) -> bool: or self.settings.max_evaluation_time_total_seconds is not None ) - def _get_next_trial(self) -> Trial | Literal["break"]: + def _get_next_trial(self) -> Trial | Literal["break"]: # noqa: PLR0915 # If there are no global stopping criterion, we can no just return early. with self.state._optimizer_lock.lock(worker_id=self.worker_id): # NOTE: It's important to release the trial lock before sampling @@ -512,8 +512,39 @@ def _get_next_trial(self) -> Trial | Literal["break"]: trials = self.state._trial_repo.latest() if self._requires_global_stopping_criterion: - should_stop = self._check_global_stopping_criterion(trials)[0] + should_stop, stop_dict = self._check_global_stopping_criterion(trials) if should_stop is not False: + # Update the best_config.txt to include the final cumulative + # metrics + main_dir = Path(self.state.path) + summary_dir = main_dir / "summary" + improvement_trace_path = ( + summary_dir / "best_config_trajectory.txt" + ) + best_config_path = summary_dir / "best_config.txt" + _trace_lock = FileLock(".trace.lock") + _trace_lock_path = Path(str(_trace_lock.lock_file)) + _trace_lock_path.touch(exist_ok=True) + + with _trace_lock: + trace_text, best_config_text = _build_trace_texts( + self.state.all_best_configs + ) + + # Add final cumulative metrics to the best config text + best_config_text += "\n" + best_config_text += "-" * 80 + best_config_text += ( + "\nFinal cumulative metrics (Assuming completed run):" + ) + for metric, value in stop_dict.items(): + best_config_text += f"\n{metric}: {value}" + + with improvement_trace_path.open(mode="w") as f: + f.write(trace_text) + + with best_config_path.open(mode="w") as f: + f.write(best_config_text) logger.info(should_stop) return "break" From 85916f49685c4cc64ce333419b3e3fb8fbce71ef Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 16 Oct 2025 01:08:10 +0200 Subject: [PATCH 082/156] feat: Update summary writing logic to ensure metrics are recorded only when required and improve DataFrame handling --- neps/api.py | 2 +- neps/optimizers/neps_bracket_optimizer.py | 4 +- neps/runtime.py | 76 +++++++++++------------ neps/status/status.py | 16 ++--- 4 files changed, 49 insertions(+), 49 deletions(-) diff --git a/neps/api.py b/neps/api.py index 4b7d4fefc..72f7483c1 100644 --- a/neps/api.py +++ b/neps/api.py @@ -529,7 +529,7 @@ def __call__( post_run_csv(root_directory) root_directory = Path(root_directory) summary_dir = root_directory / "summary" - if not write_summary_to_disk: + if write_summary_to_disk: trajectory_of_improvements(root_directory) logger.info( "The summary folder has been created, which contains csv and txt files with" diff --git a/neps/optimizers/neps_bracket_optimizer.py b/neps/optimizers/neps_bracket_optimizer.py index e2d07f763..f48d17d1f 100644 --- a/neps/optimizers/neps_bracket_optimizer.py +++ b/neps/optimizers/neps_bracket_optimizer.py @@ -75,11 +75,11 @@ def __call__( # noqa: C901, PLR0912 case "highest_fidelity": # fid_max config = self._sample_prior(fidelity_level="max") rung = max(self.rung_to_fid) - return SampledConfig(id=f"1_{rung}", config=config) + return SampledConfig(id=f"1_rung_{rung}", config=config) case True: # fid_min config = self._sample_prior(fidelity_level="min") rung = min(self.rung_to_fid) - return SampledConfig(id=f"1_{rung}", config=config) + return SampledConfig(id=f"1__rung_{rung}", config=config) case False: pass diff --git a/neps/runtime.py b/neps/runtime.py index 2d8be24e6..564c8ac5f 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -495,7 +495,7 @@ def _requires_global_stopping_criterion(self) -> bool: or self.settings.max_evaluation_time_total_seconds is not None ) - def _get_next_trial(self) -> Trial | Literal["break"]: # noqa: PLR0915 + def _get_next_trial(self) -> Trial | Literal["break"]: # noqa: PLR0915, C901 # If there are no global stopping criterion, we can no just return early. with self.state._optimizer_lock.lock(worker_id=self.worker_id): # NOTE: It's important to release the trial lock before sampling @@ -514,37 +514,39 @@ def _get_next_trial(self) -> Trial | Literal["break"]: # noqa: PLR0915 if self._requires_global_stopping_criterion: should_stop, stop_dict = self._check_global_stopping_criterion(trials) if should_stop is not False: - # Update the best_config.txt to include the final cumulative - # metrics - main_dir = Path(self.state.path) - summary_dir = main_dir / "summary" - improvement_trace_path = ( - summary_dir / "best_config_trajectory.txt" - ) - best_config_path = summary_dir / "best_config.txt" _trace_lock = FileLock(".trace.lock") _trace_lock_path = Path(str(_trace_lock.lock_file)) _trace_lock_path.touch(exist_ok=True) - with _trace_lock: - trace_text, best_config_text = _build_trace_texts( - self.state.all_best_configs - ) - - # Add final cumulative metrics to the best config text - best_config_text += "\n" - best_config_text += "-" * 80 - best_config_text += ( - "\nFinal cumulative metrics (Assuming completed run):" + if self.settings.write_summary_to_disk: + # Update the best_config.txt to include the final cumulative + # metrics + main_dir = Path(self.state.path) + summary_dir = main_dir / "summary" + improvement_trace_path = ( + summary_dir / "best_config_trajectory.txt" ) - for metric, value in stop_dict.items(): - best_config_text += f"\n{metric}: {value}" - - with improvement_trace_path.open(mode="w") as f: - f.write(trace_text) - - with best_config_path.open(mode="w") as f: - f.write(best_config_text) + best_config_path = summary_dir / "best_config.txt" + + with _trace_lock: + trace_text, best_config_text = _build_trace_texts( + self.state.all_best_configs + ) + + # Add final cumulative metrics to the best config text + best_config_text += "\n" + best_config_text += "-" * 80 + best_config_text += ( + "\nFinal cumulative metrics (Assuming completed run):" + ) + for metric, value in stop_dict.items(): + best_config_text += f"\n{metric}: {value}" + + with improvement_trace_path.open(mode="w") as f: + f.write(trace_text) + + with best_config_path.open(mode="w") as f: + f.write(best_config_text) logger.info(should_stop) return "break" @@ -643,6 +645,15 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 _set_workers_neps_state(self.state) main_dir = Path(self.state.path) + summary_dir = main_dir / "summary" + summary_dir.mkdir(parents=True, exist_ok=True) + improvement_trace_path = summary_dir / "best_config_trajectory.txt" + improvement_trace_path.touch(exist_ok=True) + best_config_path = summary_dir / "best_config.txt" + best_config_path.touch(exist_ok=True) + _trace_lock = FileLock(".trace.lock") + _trace_lock_path = Path(str(_trace_lock.lock_file)) + _trace_lock_path.touch(exist_ok=True) if self.settings.write_summary_to_disk: full_df_path, short_path, csv_locker = _initiate_summary_csv(main_dir) @@ -652,17 +663,6 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 full_df_path.touch(exist_ok=True) short_path.touch(exist_ok=True) - summary_dir = main_dir / "summary" - summary_dir.mkdir(parents=True, exist_ok=True) - - improvement_trace_path = summary_dir / "best_config_trajectory.txt" - improvement_trace_path.touch(exist_ok=True) - best_config_path = summary_dir / "best_config.txt" - best_config_path.touch(exist_ok=True) - _trace_lock = FileLock(".trace.lock") - _trace_lock_path = Path(str(_trace_lock.lock_file)) - _trace_lock_path.touch(exist_ok=True) - logger.info( "Summary files can be found in the “summary” folder inside" "the root directory: %s", diff --git a/neps/status/status.py b/neps/status/status.py index 664458798..286ff9e47 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -74,12 +74,12 @@ def df(self) -> pd.DataFrame: metadata_df = pd.DataFrame.from_records( [asdict(t.metadata) for t in trials] ).convert_dtypes() - - return ( - pd.concat([config_df, extra_df, report_df, metadata_df], axis="columns") - .set_index("id") - .dropna(how="all", axis="columns") + combined_df = pd.concat( + [config_df, extra_df, report_df, metadata_df], axis="columns" ) + if combined_df.empty: + return combined_df + return combined_df.set_index("id").dropna(how="all", axis="columns") def completed(self) -> list[Trial]: """Return all trials which are in a completed state.""" @@ -347,10 +347,10 @@ def trajectory_of_improvements( df = summary.df() - if "time_sampled" not in df.columns: + if not df.empty and "time_sampled" not in df.columns: raise ValueError("Missing `time_sampled` column in summary DataFrame.") - - df = df.sort_values("time_sampled") + if not df.empty: + df = df.sort_values("time_sampled") all_best_configs = [] best_score = float("inf") From 86a377592500fb60b26d76f23b4a89368d783fb7 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 21 Oct 2025 22:31:12 +0200 Subject: [PATCH 083/156] feat: Refactor trajectory_of_improvements to utilize _build_trace_texts for consistent trace text generation --- neps/status/status.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/neps/status/status.py b/neps/status/status.py index 286ff9e47..6b479c400 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -12,6 +12,7 @@ import numpy as np import pandas as pd +from neps.runtime import _build_trace_texts from neps.space.neps_spaces import neps_space from neps.space.neps_spaces.neps_space import NepsCompatConverter from neps.space.neps_spaces.sampling import OnlyPredefinedValuesSampler @@ -354,7 +355,6 @@ def trajectory_of_improvements( all_best_configs = [] best_score = float("inf") - trace_text = "" for trial_id, row in df.iterrows(): if "objective_to_minimize" not in row or pd.isna(row["objective_to_minimize"]): @@ -376,11 +376,8 @@ def trajectory_of_improvements( } all_best_configs.append(best) - trace_text += ( - f"Objective to minimize: {best['score']}\n" - f"Config ID: {best['trial_id']}\n" - f"Config: {best['config']}\n" + "-" * 80 + "\n" - ) + # Use the _build_trace_texts function to generate consistent trace text + trace_text, best_config_text = _build_trace_texts(all_best_configs) summary_dir = root_directory / "summary" summary_dir.mkdir(parents=True, exist_ok=True) @@ -389,14 +386,9 @@ def trajectory_of_improvements( f.write(trace_text) if all_best_configs: - final_best = all_best_configs[-1] best_path = summary_dir / "best_config.txt" with best_path.open("w") as f: - f.write( - f"Objective to minimize: {final_best['score']}\n" - f"Config ID: {final_best['trial_id']}\n" - f"Config: {final_best['config']}\n" - ) + f.write(best_config_text) return all_best_configs From 69d6eb55e93cf53baa2dab4a8cb5b61616f13aac Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 21 Oct 2025 22:59:58 +0200 Subject: [PATCH 084/156] feat: Move _build_trace_texts function to status.py and update imports for better organization --- neps/api.py | 3 +- neps/runtime.py | 79 +------------------------------------------ neps/status/status.py | 78 +++++++++++++++++++++++++++++++++++++++++- 3 files changed, 79 insertions(+), 81 deletions(-) diff --git a/neps/api.py b/neps/api.py index 72f7483c1..d2a6e8f17 100644 --- a/neps/api.py +++ b/neps/api.py @@ -29,7 +29,7 @@ from neps.state import NePSState, OptimizationState, SeedSnapshot from neps.state.neps_state import TrialRepo from neps.state.pipeline_eval import EvaluatePipelineReturn -from neps.status.status import post_run_csv, trajectory_of_improvements +from neps.status.status import post_run_csv from neps.utils.common import dynamic_load_object if TYPE_CHECKING: @@ -530,7 +530,6 @@ def __call__( root_directory = Path(root_directory) summary_dir = root_directory / "summary" if write_summary_to_disk: - trajectory_of_improvements(root_directory) logger.info( "The summary folder has been created, which contains csv and txt files with" "the output of all data in the run (short.csv - only the best; full.csv - " diff --git a/neps/runtime.py b/neps/runtime.py index 564c8ac5f..dea5c822a 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -49,7 +49,7 @@ WorkerSettings, evaluate_trial, ) -from neps.status.status import _initiate_summary_csv, status +from neps.status.status import _build_trace_texts, _initiate_summary_csv, status from neps.utils.common import gc_disabled if TYPE_CHECKING: @@ -59,83 +59,6 @@ logger = logging.getLogger(__name__) -def _build_trace_texts(best_configs: list[dict]) -> tuple[str, str]: - """Build trace text and best config text from a list of best configurations. - - Args: - best_configs: List of best configuration dictionaries containing - 'trial_id', 'score', 'config', and optional metrics. - - Returns: - Tuple of (trace_text, best_config_text) strings. - """ - trace_text = ( - "Best configs and their objectives across evaluations:\n" + "-" * 80 + "\n" - ) - - for best in best_configs: - trace_text += ( - f"Config ID: {best['trial_id']}\nObjective to minimize: {best['score']}\n" - + (f"Cost: {best.get('cost', 0)}\n" if "cost" in best else "") - + ( - f"Cumulative evaluations: {best.get('cumulative_evaluations', 0)}\n" - if "cumulative_evaluations" in best - else "" - ) - + ( - f"Cumulative fidelities: {best.get('cumulative_fidelities', 0)}\n" - if "cumulative_fidelities" in best - else "" - ) - + ( - f"Cumulative cost: {best.get('cumulative_cost', 0)}\n" - if "cumulative_cost" in best - else "" - ) - + ( - f"Cumulative time: {best.get('cumulative_time', 0)}\n" - if "cumulative_time" in best - else "" - ) - + f"Config: {best['config']}\n" - + "-" * 80 - + "\n" - ) - - best_config_text = "" - if best_configs: - best_config = best_configs[-1] # Latest best - best_config_text = ( - "# Best config:" - f"\n\n Config ID: {best_config['trial_id']}" - f"\n Objective to minimize: {best_config['score']}" - + (f"\n Cost: {best_config['cost']}" if "cost" in best_config else "") - + ( - f"\n Cumulative evaluations: {best_config['cumulative_evaluations']}" - if "cumulative_evaluations" in best_config - else "" - ) - + ( - f"\n Cumulative fidelities: {best_config['cumulative_fidelities']}" - if "cumulative_fidelities" in best_config - else "" - ) - + ( - f"\n Cumulative cost: {best_config['cumulative_cost']}" - if "cumulative_cost" in best_config - else "" - ) - + ( - f"\n Cumulative time: {best_config['cumulative_time']}" - if "cumulative_time" in best_config - else "" - ) - + f"\n Config: {best_config['config']}" - ) - - return trace_text, best_config_text - - _DDP_ENV_VAR_NAME = "NEPS_DDP_TRIAL_ID" diff --git a/neps/status/status.py b/neps/status/status.py index 6b479c400..16ac32f13 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -12,7 +12,6 @@ import numpy as np import pandas as pd -from neps.runtime import _build_trace_texts from neps.space.neps_spaces import neps_space from neps.space.neps_spaces.neps_space import NepsCompatConverter from neps.space.neps_spaces.sampling import OnlyPredefinedValuesSampler @@ -23,6 +22,83 @@ from neps.space.neps_spaces.parameters import PipelineSpace +def _build_trace_texts(best_configs: list[dict]) -> tuple[str, str]: + """Build trace text and best config text from a list of best configurations. + + Args: + best_configs: List of best configuration dictionaries containing + 'trial_id', 'score', 'config', and optional metrics. + + Returns: + Tuple of (trace_text, best_config_text) strings. + """ + trace_text = ( + "Best configs and their objectives across evaluations:\n" + "-" * 80 + "\n" + ) + + for best in best_configs: + trace_text += ( + f"Config ID: {best['trial_id']}\nObjective to minimize: {best['score']}\n" + + (f"Cost: {best.get('cost', 0)}\n" if "cost" in best else "") + + ( + f"Cumulative evaluations: {best.get('cumulative_evaluations', 0)}\n" + if "cumulative_evaluations" in best + else "" + ) + + ( + f"Cumulative fidelities: {best.get('cumulative_fidelities', 0)}\n" + if "cumulative_fidelities" in best + else "" + ) + + ( + f"Cumulative cost: {best.get('cumulative_cost', 0)}\n" + if "cumulative_cost" in best + else "" + ) + + ( + f"Cumulative time: {best.get('cumulative_time', 0)}\n" + if "cumulative_time" in best + else "" + ) + + f"Config: {best['config']}\n" + + "-" * 80 + + "\n" + ) + + best_config_text = "" + if best_configs: + best_config = best_configs[-1] # Latest best + best_config_text = ( + "# Best config:" + f"\n\n Config ID: {best_config['trial_id']}" + f"\n Objective to minimize: {best_config['score']}" + + (f"\n Cost: {best_config['cost']}" if "cost" in best_config else "") + + ( + f"\n Cumulative evaluations: {best_config['cumulative_evaluations']}" + if "cumulative_evaluations" in best_config + else "" + ) + + ( + f"\n Cumulative fidelities: {best_config['cumulative_fidelities']}" + if "cumulative_fidelities" in best_config + else "" + ) + + ( + f"\n Cumulative cost: {best_config['cumulative_cost']}" + if "cumulative_cost" in best_config + else "" + ) + + ( + f"\n Cumulative time: {best_config['cumulative_time']}" + if "cumulative_time" in best_config + else "" + ) + + f"\n Config: {best_config['config']}" + ) + + return trace_text, best_config_text + + @dataclass class Summary: """Summary of the current state of a neps run.""" From 34af3aa7a6444bc8a839ebc433e2a72fe1697030 Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 22 Oct 2025 21:31:43 +0200 Subject: [PATCH 085/156] feat: Remove trajectory_of_improvements function --- neps/status/status.py | 64 ------------------------------------------- 1 file changed, 64 deletions(-) diff --git a/neps/status/status.py b/neps/status/status.py index 16ac32f13..af427d3f3 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -405,70 +405,6 @@ def status( return df, short -def trajectory_of_improvements( - root_directory: str | Path, -) -> list[dict]: - """Track and write the trajectory of improving configurations over time. - - Args: - root_directory: The root directory given to neps.run. - - Returns: - List of dicts with improving scores and their configurations. - """ - root_directory = Path(root_directory) - summary = Summary.from_directory(root_directory) - - if summary.is_multiobjective: - return [] - - df = summary.df() - - if not df.empty and "time_sampled" not in df.columns: - raise ValueError("Missing `time_sampled` column in summary DataFrame.") - if not df.empty: - df = df.sort_values("time_sampled") - - all_best_configs = [] - best_score = float("inf") - - for trial_id, row in df.iterrows(): - if "objective_to_minimize" not in row or pd.isna(row["objective_to_minimize"]): - continue - - score = row["objective_to_minimize"] - if score < best_score: - best_score = score - config = { - k.replace("config.", ""): v - for k, v in row.items() - if k.startswith("config.") - } - - best = { - "score": score, - "trial_id": trial_id, - "config": config, - } - all_best_configs.append(best) - - # Use the _build_trace_texts function to generate consistent trace text - trace_text, best_config_text = _build_trace_texts(all_best_configs) - - summary_dir = root_directory / "summary" - summary_dir.mkdir(parents=True, exist_ok=True) - output_path = summary_dir / "best_config_trajectory.txt" - with output_path.open("w") as f: - f.write(trace_text) - - if all_best_configs: - best_path = summary_dir / "best_config.txt" - with best_path.open("w") as f: - f.write(best_config_text) - - return all_best_configs - - def _initiate_summary_csv(root_directory: str | Path) -> tuple[Path, Path, FileLocker]: """Initializes a summary CSV and an associated locker for file access control. From 740065ac6a49f43e5e59cbe0f89ed8b0ac52ff0b Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 23 Oct 2025 03:38:31 +0200 Subject: [PATCH 086/156] feat: Fix resampling by seperating __eq and is_equivalent. Improved caching --- neps/api.py | 4 +- neps/optimizers/algorithms.py | 2 +- neps/runtime.py | 8 +- neps/space/neps_spaces/neps_space.py | 119 ++++++++++++---- neps/space/neps_spaces/parameters.py | 201 ++++++++++++++++++++------- tests/test_state/test_neps_state.py | 2 + 6 files changed, 253 insertions(+), 83 deletions(-) diff --git a/neps/api.py b/neps/api.py index d2a6e8f17..1e8dfecae 100644 --- a/neps/api.py +++ b/neps/api.py @@ -448,7 +448,7 @@ def __call__( if isinstance(space, PipelineSpace) and neps_classic_space_compatibility == "classic": raise ValueError( "The provided optimizer is not compatible with this complex search space. " - "Please use one that is, such as 'random_search', " + "Please use one that is, such as 'random_search', 'hyperband'" "'priorband', or 'complex_random_search'." ) @@ -471,8 +471,6 @@ def __call__( "mo_hyperband", "primo", "neps_priorband", - "neps_random_search", - "complex_random_search", "neps_bracket_optimizer", "neps_hyperband", } diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 5eee5e692..1509e2b9b 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -1658,7 +1658,7 @@ def _neps_bracket_optimizer( if len(fidelity_attrs.items()) != 1: raise ValueError( - "Only one fidelity should be defined in the pipeline space." + "Exactly one fidelity should be defined in the pipeline space." f"\nGot: {fidelity_attrs!r}" ) diff --git a/neps/runtime.py b/neps/runtime.py index dea5c822a..a5dd12282 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -815,7 +815,7 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 "Learning Curve %s: %s", evaluated_trial.id, report.learning_curve ) - def load_incumbent_trace( # noqa: C901 + def load_incumbent_trace( # noqa: C901, PLR0912 self, previous_trials: dict[str, Trial], _trace_lock: FileLock, @@ -872,9 +872,11 @@ def load_incumbent_trace( # noqa: C901 ) if hasattr(optimizer, "space"): + fidelity_name = "" if not isinstance(optimizer.space, PipelineSpace): - fidelity_name = next(iter(optimizer.space.fidelities.keys())) - else: + if optimizer.space.fidelities: + fidelity_name = next(iter(optimizer.space.fidelities.keys())) + elif optimizer.space.fidelity_attrs: fidelity_name = next(iter(optimizer.space.fidelity_attrs.keys())) fidelity_name = ( f"{NepsCompatConverter._ENVIRONMENT_PREFIX}{fidelity_name}" diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index f0e16eb61..e25905042 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -116,6 +116,10 @@ def __init__( # `_resolved_objects` stores the intermediate values to make re-use possible. self._resolved_objects: dict[Any, Any] = {} + # `_sampled_domains` tracks domain objects that have been sampled from by + # (id, path) key + self._sampled_domains: set[tuple[int, str]] = set() + # `_current_path_parts` stores the current path we are resolving. self._current_path_parts: list[str] = [] @@ -181,7 +185,12 @@ def was_already_resolved(self, obj: Any) -> bool: Returns: True if the object was already resolved, False otherwise. """ - return obj in self._resolved_objects + try: + # Try to use the object itself as a key (works for hashable objects) + return obj in self._resolved_objects + except TypeError: + # If the object is not hashable, fall back to using its id + return id(obj) in self._resolved_objects def add_resolved(self, original: Any, resolved: Any) -> None: """Add a resolved object to the context. @@ -205,7 +214,12 @@ def add_resolved(self, original: Any, resolved: Any) -> None: raise ValueError( f"Attempting to add a Resampled object to resolved values: {original!r}." ) - self._resolved_objects[original] = resolved + try: + # Try to use the object itself as a key (works for hashable objects) + self._resolved_objects[original] = resolved + except TypeError: + # If the object is not hashable, fall back to using its id + self._resolved_objects[id(original)] = resolved def get_resolved(self, obj: Any) -> Any: """Get the resolved value for the given object. @@ -220,11 +234,23 @@ def get_resolved(self, obj: Any) -> Any: ValueError: If the object was not already resolved in the context. """ try: + # Try to use the object itself as a key (works for hashable objects) return self._resolved_objects[obj] - except KeyError as err: - raise ValueError( - f"Given object was not already resolved. Please check first: {obj!r}" - ) from err + except (KeyError, TypeError) as err: + if isinstance(err, TypeError): + # If the object is not hashable, try using its id + try: + return self._resolved_objects[id(obj)] + except KeyError as id_err: + raise ValueError( + "Given object was not already resolved. Please check first:" + f" {obj!r}" + ) from id_err + else: + # KeyError - object wasn't found + raise ValueError( + f"Given object was not already resolved. Please check first: {obj!r}" + ) from err def sample_from(self, domain_obj: Domain) -> Any: """Sample a value from the given domain object. @@ -239,18 +265,6 @@ def sample_from(self, domain_obj: Domain) -> Any: ValueError: If the domain object was already resolved or if the path has already been sampled from. """ - # Each `domain_obj` is only ever sampled from once. - # This is okay and the expected behavior. - # For each `domain_obj`, its sampled value is either directly stored itself, - # or is used in some other Resolvable. - # In both cases that sampled value is cached for later uses, - # and so the `domain_obj` will not be re-sampled from again. - if self.was_already_resolved(domain_obj): - raise ValueError( - "We have already sampled a value for the given domain object:" - f" {domain_obj!r}." + "\nThis should not be happening." - ) - # The range compatibility identifier is there to make sure when we say # the path matches, that the range for the value we are looking up also matches. domain_obj_type_name = type(domain_obj).__name__.lower() @@ -270,12 +284,25 @@ def sample_from(self, domain_obj: Domain) -> Any: + "\nThis should not be happening." ) + # For domain object tracking, create a key that includes both domain ID and path + # context + # This allows the same domain to be sampled in different resampled contexts + domain_path_key = (id(domain_obj), current_path) + + if domain_path_key in self._sampled_domains: + raise ValueError( + "We have already sampled a value for the given domain object:" + f" {domain_obj!r} at path {current_path!r}." + + "\nThis should not be happening." + ) + sampled_value = self._domain_sampler( domain_obj=domain_obj, current_path=current_path, ) self._samplings_made[current_path] = sampled_value + self._sampled_domains.add(domain_path_key) return self._samplings_made[current_path] def get_value_from_environment(self, var_name: str) -> Any: @@ -434,9 +461,18 @@ def _( domain_obj: Domain, context: SamplingResolutionContext, ) -> Any: - # Use path-aware caching to ensure different parameter positions - # with the same domain get sampled independently - if context.was_already_resolved_with_path(domain_obj): + # Check if we're in a resampled context (path contains "resampled_") + current_path = ".".join(context._current_path_parts) + is_resampled_context = "resampled_" in current_path + + # Always check object-identity cache first for shared objects + if context.was_already_resolved(domain_obj): + return context.get_resolved(domain_obj) + + # In non-resampled contexts, also check path-specific cache + if not is_resampled_context and context.was_already_resolved_with_path( + domain_obj + ): return context.get_resolved_with_path(domain_obj) initial_attrs = domain_obj.get_attrs() @@ -460,7 +496,16 @@ def _( raise ValueError(f"Failed to sample from {resolved_domain_obj!r}.") from e result = self._resolve(sampled_value, "sampled_value", context) - context.add_resolved_with_path(domain_obj, result) + # Cache the result + if not is_resampled_context: + # In normal contexts, cache with both object identity and path + context.add_resolved(domain_obj, result) + context.add_resolved_with_path(domain_obj, result) + elif not context.was_already_resolved(domain_obj): + # In resampled contexts, cache shared objects for reuse across contexts + # but skip path-based caching to prevent unwanted dependencies + context.add_resolved(domain_obj, result) + return result @_resolver_dispatch.register @@ -884,7 +929,9 @@ def convert_operation_to_callable(operation: Operation) -> Callable: operator = cast(Callable, operation.operator) operation_args: list[Any] = [] - for arg in operation.args: + for arg in ( + operation.args if isinstance(operation.args, tuple | list) else (operation.args,) + ): if isinstance(arg, tuple | list): arg_sequence: list[Any] = [] for a in arg: @@ -920,12 +967,11 @@ def convert_operation_to_callable(operation: Operation) -> Callable: if isinstance(kwarg_value, Operation) else kwarg_value ) - return cast(Callable, operator(*operation_args, **operation_kwargs)) def _operation_to_unwrapped_config( - operation: Operation | str, + operation: Operation | str | Resolvable, level: int = 1, ) -> list[config_string.UnwrappedConfigStringPart]: result = [] @@ -941,9 +987,28 @@ def _operation_to_unwrapped_config( operands="", ) result.append(item) - for operand in operation.args: - result.extend(_operation_to_unwrapped_config(operand, level + 1)) + + # Handle args that might be a Resolvable or an iterable + args = operation.args + if isinstance(args, Resolvable): + # If args is a Resolvable, treat it as a single operand + result.extend(_operation_to_unwrapped_config(args, level + 1)) + else: + # If args is iterable (tuple/list), iterate over each operand + for operand in args: + result.extend(_operation_to_unwrapped_config(operand, level + 1)) + elif isinstance(operation, Resolvable): + # Handle other Resolvable types that are not Operations + item = config_string.UnwrappedConfigStringPart( + level=level, + opening_index=-1, + operator=str(operation), # Convert to string for display + hyperparameters="", + operands="", + ) + result.append(item) else: + # Handle string operations item = config_string.UnwrappedConfigStringPart( level=level, opening_index=-1, diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 14de28dbe..2a98cf24c 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -24,6 +24,29 @@ def __repr__(self) -> str: _UNSET = _Unset() +def _parameters_are_equivalent(param1: Any, param2: Any) -> bool: + """Check if two parameters are equivalent using their is_equivalent_to method. + + This helper function provides a safe way to compare parameters without + interfering with Python's object identity system. Falls back to regular + equality comparison for objects that don't have the is_equivalent_to method. + + Args: + param1: First parameter to compare. + param2: Second parameter to compare. + + Returns: + True if the parameters are equivalent, False otherwise. + """ + # Try to use the is_equivalent_to method if available + if hasattr(param1, "is_equivalent_to"): + return param1.is_equivalent_to(param2) + if hasattr(param2, "is_equivalent_to"): + return param2.is_equivalent_to(param1) + # Fall back to regular equality for other types + return param1 == param2 + + @runtime_checkable class Resolvable(Protocol): """A protocol for objects that can be resolved into attributes.""" @@ -87,14 +110,33 @@ def __str__(self) -> str: """Get a string representation of the fidelity.""" return f"Fidelity({self._domain.__str__()})" - def __eq__(self, other: Fidelity | object) -> bool: + def is_equivalent_to(self, other: object) -> bool: + """Check if this fidelity parameter is equivalent to another. + + This method provides comparison logic without interfering with Python's + object identity system (unlike __eq__). Use this for functional comparisons + like checking if parameters have the same configuration. + + Args: + other: The object to compare with. + + Returns: + True if the objects are equivalent, False otherwise. + """ if not isinstance(other, Fidelity): return False return self._domain == other._domain + def __eq__(self, other: object) -> bool: + """Check if this is the exact same object instance. + + This uses object identity to avoid interfering with the resolution caching system. + """ + return self is other + def __hash__(self) -> int: - """Get the hash of the fidelity based on its domain.""" - return hash(self._domain) + """Get hash based on object identity.""" + return id(self) @property def min_value(self) -> int | float: @@ -268,7 +310,7 @@ class NewSpace(PipelineSpace): new_pipeline = NewSpace() for exist_name, value in self.get_attrs().items(): setattr(new_pipeline, exist_name, value) - if exist_name == param_name and not value == other: + if exist_name == param_name and not _parameters_are_equivalent(value, other): raise ValueError( f"A different parameter with the name {param_name!r} already exists" " in the pipeline:\n" @@ -637,29 +679,37 @@ def __str__(self) -> str: string += ")" return string - def __eq__(self, other: Categorical | object) -> bool: + def is_equivalent_to(self, other: object) -> bool: + """Check if this categorical parameter is equivalent to another. + + This method provides comparison logic without interfering with Python's + object identity system (unlike __eq__). Use this for functional comparisons + like checking if parameters have the same configuration. + + Args: + other: The object to compare with. + + Returns: + True if the objects are equivalent, False otherwise. + """ if not isinstance(other, Categorical): return False return ( - self.prior == other.prior - and self.prior_confidence == other.prior_confidence + self._prior == other._prior + and self._prior_confidence == other._prior_confidence and self.choices == other.choices ) + def __eq__(self, other: object) -> bool: + """Check if this is the exact same object instance. + + This uses object identity to avoid interfering with the resolution caching system. + """ + return self is other + def __hash__(self) -> int: - """Get the hash of the categorical domain based on its attributes.""" - try: - choices_hash = hash(self.choices) - except TypeError: - # If choices are not hashable (e.g., contain mutable objects), use id - choices_hash = id(self.choices) - return hash( - ( - self._prior if self._prior is not _UNSET else None, - self._prior_confidence if self._prior_confidence is not _UNSET else None, - choices_hash, - ) - ) + """Get hash based on object identity.""" + return id(self) @property def min_value(self) -> int: @@ -847,7 +897,19 @@ def __str__(self) -> str: string += ")" return string - def __eq__(self, other: Float | object) -> bool: + def is_equivalent_to(self, other: object) -> bool: + """Check if this float parameter is equivalent to another. + + This method provides comparison logic without interfering with Python's + object identity system (unlike __eq__). Use this for functional comparisons + like checking if parameters have the same configuration. + + Args: + other: The object to compare with. + + Returns: + True if the objects are equivalent, False otherwise. + """ if not isinstance(other, Float): return False return ( @@ -858,17 +920,16 @@ def __eq__(self, other: Float | object) -> bool: and self._log == other._log ) + def __eq__(self, other: object) -> bool: + """Check if this is the exact same object instance. + + This uses object identity to avoid interfering with the resolution caching system. + """ + return self is other + def __hash__(self) -> int: - """Get the hash of the float domain based on its attributes.""" - return hash( - ( - self._prior if self._prior is not _UNSET else None, - self._prior_confidence if self._prior_confidence is not _UNSET else None, - self.min_value, - self.max_value, - self._log, - ) - ) + """Get hash based on object identity.""" + return id(self) @property def min_value(self) -> float: @@ -1054,7 +1115,19 @@ def __str__(self) -> str: string += ")" return string - def __eq__(self, other: Integer | object) -> bool: + def is_equivalent_to(self, other: object) -> bool: + """Check if this integer parameter is equivalent to another. + + This method provides comparison logic without interfering with Python's + object identity system (unlike __eq__). Use this for functional comparisons + like checking if parameters have the same configuration. + + Args: + other: The object to compare with. + + Returns: + True if the objects are equivalent, False otherwise. + """ if not isinstance(other, Integer): return False return ( @@ -1065,17 +1138,16 @@ def __eq__(self, other: Integer | object) -> bool: and self._log == other._log ) + def __eq__(self, other: object) -> bool: + """Check if this is the exact same object instance. + + This uses object identity to avoid interfering with the resolution caching system. + """ + return self is other + def __hash__(self) -> int: - """Get the hash of the integer domain based on its attributes.""" - return hash( - ( - self._prior if self._prior is not _UNSET else None, - self._prior_confidence if self._prior_confidence is not _UNSET else None, - self.min_value, - self.max_value, - self._log, - ) - ) + """Get hash based on object identity.""" + return id(self) @property def min_value(self) -> int: @@ -1253,7 +1325,19 @@ def __str__(self) -> str: f" kwargs={self._kwargs!s})" ) - def __eq__(self, other: Operation | object) -> bool: + def is_equivalent_to(self, other: object) -> bool: + """Check if this operation parameter is equivalent to another. + + This method provides comparison logic without interfering with Python's + object identity system (unlike __eq__). Use this for functional comparisons + like checking if parameters have the same configuration. + + Args: + other: The object to compare with. + + Returns: + True if the objects are equivalent, False otherwise. + """ if not isinstance(other, Operation): return False return ( @@ -1262,6 +1346,17 @@ def __eq__(self, other: Operation | object) -> bool: and self.kwargs == other.kwargs ) + def __eq__(self, other: object) -> bool: + """Check if this is the exact same object instance. + + This uses object identity to avoid interfering with the resolution caching system. + """ + return self is other + + def __hash__(self) -> int: + """Get hash based on object identity.""" + return id(self) + @property def operator(self) -> Callable | str: """Get the operator of the operation. @@ -1283,10 +1378,14 @@ def args(self) -> tuple[Any, ...]: A tuple of arguments to be passed to the operator. Raises: - ValueError: If the args are not a tuple or Resolvable. - + ValueError: If the args are not resolved to a tuple. """ - return cast(tuple[Any, ...], self._args) + if isinstance(self._args, Resolvable): + raise ValueError( + f"Operation args contain unresolved Resolvable: {self._args!r}. " + "The operation needs to be resolved before accessing args as a tuple." + ) + return self._args @property def kwargs(self) -> Mapping[str, Any]: @@ -1296,10 +1395,14 @@ def kwargs(self) -> Mapping[str, Any]: A mapping of keyword arguments to be passed to the operator. Raises: - ValueError: If the kwargs are not a mapping or Resolvable. - + ValueError: If the kwargs are not resolved to a mapping. """ - return cast(Mapping[str, Any], self._kwargs) + if isinstance(self._kwargs, Resolvable): + raise ValueError( + f"Operation kwargs contain unresolved Resolvable: {self._kwargs!r}. " + "The operation needs to be resolved before accessing kwargs as a mapping." + ) + return self._kwargs def get_attrs(self) -> Mapping[str, Any]: """Get the attributes of the operation as a mapping. diff --git a/tests/test_state/test_neps_state.py b/tests/test_state/test_neps_state.py index d977f321c..cb59800a3 100644 --- a/tests/test_state/test_neps_state.py +++ b/tests/test_state/test_neps_state.py @@ -97,6 +97,7 @@ class SpaceFidPrior(PipelineSpace): "moasha", "mo_hyperband", "neps_priorband", + "neps_hyperband", ] NO_DEFAULT_FIDELITY_SUPPORT = [ "random_search", @@ -138,6 +139,7 @@ class SpaceFidPrior(PipelineSpace): "neps_priorband", "neps_random_search", "complex_random_search", + "neps_hyperband", ] From a7308162dc343944ee39e1a02b28b1ac21e3ae45 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 23 Oct 2025 04:58:22 +0200 Subject: [PATCH 087/156] Add comprehensive tests for NePS space conversion, compatibility, and metrics functionality - Introduced tests for converting between classic SearchSpace and NePS PipelineSpace, ensuring attributes and properties are preserved. - Implemented tests for algorithm compatibility with both NePS-only and classic algorithms. - Added tests for trajectory and metrics functionality, including handling of fidelity parameters and cumulative metrics tracking. - Included error handling tests for failed evaluations and validation of metric values. - Verified file format and structure for trajectory and best config outputs. - Ensured that NePS can revisit previous runs and update trajectories correctly. --- neps/space/neps_spaces/neps_space.py | 12 + .../test_basic_functionality.py | 163 ++++++ .../test_neps_space/test_neps_integration.py | 243 ++++++++- ...st_neps_integration_priorband__max_cost.py | 4 +- ...t_neps_integration_priorband__max_evals.py | 4 +- .../test_pipeline_space_methods.py | 396 ++++++++++++++ .../test_search_space__nos_like.py | 2 +- ...test_space_conversion_and_compatibility.py | 495 +++++++++++++++++ .../test_trajectory_and_metrics.py | 505 ++++++++++++++++++ 9 files changed, 1817 insertions(+), 7 deletions(-) create mode 100644 tests/test_neps_space/test_basic_functionality.py create mode 100644 tests/test_neps_space/test_pipeline_space_methods.py create mode 100644 tests/test_neps_space/test_space_conversion_and_compatibility.py create mode 100644 tests/test_neps_space/test_trajectory_and_metrics.py diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index e25905042..ebee6c7cb 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -1252,6 +1252,7 @@ def convert_neps_to_classic_search_space(space: PipelineSpace) -> SearchSpace | classic_space[key] = neps.HPOInteger( lower=value.min_value, upper=value.max_value, + log=value._log if hasattr(value, "_log") else False, prior=value.prior if value.has_prior else None, prior_confidence=( value.prior_confidence.value if value.has_prior else "low" @@ -1261,6 +1262,7 @@ def convert_neps_to_classic_search_space(space: PipelineSpace) -> SearchSpace | classic_space[key] = neps.HPOFloat( lower=value.min_value, upper=value.max_value, + log=value._log if hasattr(value, "_log") else False, prior=value.prior if value.has_prior else None, prior_confidence=( value.prior_confidence.value if value.has_prior else "low" @@ -1271,12 +1273,22 @@ def convert_neps_to_classic_search_space(space: PipelineSpace) -> SearchSpace | classic_space[key] = neps.HPOInteger( lower=value._domain.min_value, upper=value._domain.max_value, + log=( + value._domain._log + if hasattr(value._domain, "_log") + else False + ), is_fidelity=True, ) elif isinstance(value._domain, Float): classic_space[key] = neps.HPOFloat( lower=value._domain.min_value, upper=value._domain.max_value, + log=( + value._domain._log + if hasattr(value._domain, "_log") + else False + ), is_fidelity=True, ) else: diff --git a/tests/test_neps_space/test_basic_functionality.py b/tests/test_neps_space/test_basic_functionality.py new file mode 100644 index 000000000..c8c2d5f93 --- /dev/null +++ b/tests/test_neps_space/test_basic_functionality.py @@ -0,0 +1,163 @@ +"""Simplified tests for basic NePS functionality.""" + +from __future__ import annotations + +import tempfile +from pathlib import Path + +import pytest + +import neps +from neps.optimizers import algorithms +from neps.space.neps_spaces.parameters import ( + Float, + Integer, + PipelineSpace, +) + + +class SimpleSpace(PipelineSpace): + """Simple space for testing.""" + + x = Float(min_value=0.0, max_value=1.0) + y = Integer(min_value=1, max_value=10) + + +def simple_evaluation(x: float, y: int) -> float: + """Simple evaluation function.""" + return x + y + + +def test_basic_neps_run(): + """Test that basic NePS run functionality works.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "basic_test" + + # Run optimization + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Check that optimization ran and created some files + assert root_directory.exists() + + # Should have created some evaluation files + files = list(root_directory.rglob("*")) + assert len(files) > 0, "Should have created some files" + + +def test_neps_optimization_with_dict_return(): + """Test NePS optimization with evaluation function returning dict.""" + + def dict_evaluation(x: float, y: int) -> dict: + return { + "objective_to_minimize": x + y, + "additional_metric": x * y, + } + + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "dict_test" + + # Run optimization + neps.run( + evaluate_pipeline=dict_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Check that optimization completed + assert root_directory.exists() + + +def test_different_neps_optimizers(): + """Test that different NePS optimizers work.""" + optimizers_to_test = [ + algorithms.neps_random_search, + algorithms.complex_random_search, + ] + + for optimizer in optimizers_to_test: + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / f"optimizer_{optimizer.__name__}" + + # Run optimization + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=optimizer, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Check that optimization completed + assert root_directory.exists() + + +def test_neps_status_functionality(): + """Test that neps.status works after optimization.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "status_test" + + # Run optimization + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=5, + overwrite_root_directory=True, + ) + + # Test status functionality (should not raise an error) + try: + neps.status(str(root_directory)) + except (FileNotFoundError, ValueError, KeyError) as e: + pytest.fail(f"neps.status should work after optimization: {e}") + + +def test_evaluation_results_are_recorded(): + """Test that evaluation results are properly recorded.""" + # Track evaluations + evaluations_called = [] + + def tracking_evaluation(x: float, y: int) -> float: + result = x + y + evaluations_called.append((x, y, result)) + return result + + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "tracking_test" + + # Run optimization + neps.run( + evaluate_pipeline=tracking_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Check that evaluations were called + assert len(evaluations_called) == 3, ( + f"Expected 3 evaluations, got {len(evaluations_called)}" + ) + + # Check that all results are reasonable + for x, y, result in evaluations_called: + assert 0.0 <= x <= 1.0, f"x should be in [0,1], got {x}" + assert 1 <= y <= 10, f"y should be in [1,10], got {y}" + assert result == x + y, f"Result should be x+y, got {result} != {x}+{y}" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index 752143257..adc4cb55a 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -8,6 +8,11 @@ import neps import neps.optimizers from neps.optimizers import algorithms +from neps.space.neps_spaces.neps_space import ( + check_neps_space_compatibility, + convert_classic_to_neps_search_space, + convert_neps_to_classic_search_space, +) from neps.space.neps_spaces.parameters import ( Categorical, ConfidenceLevel, @@ -324,8 +329,8 @@ class DemoOperationSpace(PipelineSpace): @pytest.mark.parametrize( "optimizer", [ - neps.optimizers.algorithms.neps_random_search, - neps.optimizers.algorithms.complex_random_search, + algorithms.neps_random_search, + algorithms.complex_random_search, ], ) def test_operation_demo(optimizer): @@ -343,3 +348,237 @@ def test_operation_demo(optimizer): overwrite_root_directory=True, ) neps.status(root_directory, print_summary=True) + + +# ===== Extended tests for newer NePS features ===== + + +# Test neps_hyperband with various PipelineSpaces +@pytest.mark.parametrize( + "optimizer", + [ + algorithms.neps_hyperband, + ], +) +def test_neps_hyperband_with_fidelity_demo(optimizer): + """Test neps_hyperband with a fidelity space.""" + pipeline_space = DemoHyperparameterWithFidelitySpace() + root_directory = f"/tests_tmpdir/test_neps_spaces/results/neps_hyperband_fidelity_demo__{optimizer.__name__}" + + neps.run( + evaluate_pipeline=hyperparameter_pipeline_to_optimize, + pipeline_space=pipeline_space, + optimizer=optimizer, + root_directory=root_directory, + fidelities_to_spend=15, # Use fidelities_to_spend for multi-fidelity optimizers + overwrite_root_directory=True, + ) + neps.status(root_directory, print_summary=True) + + +# Test PipelineSpace dynamic methods (add, remove, add_prior) +def test_pipeline_space_dynamic_methods(): + """Test PipelineSpace add, remove, and add_prior methods.""" + + # Create a basic space + class BasicSpace(PipelineSpace): + x = Float(min_value=0.0, max_value=1.0) + y = Integer(min_value=1, max_value=10) + + space = BasicSpace() + + # Test adding a new parameter + new_param = Categorical(choices=(True, False)) + space = space.add(new_param, "flag") + + # Verify the parameter was added + attrs = space.get_attrs() + assert "flag" in attrs + assert attrs["flag"] is new_param + + # Test adding a prior to an existing parameter + space = space.add_prior("x", prior=0.5, prior_confidence=ConfidenceLevel.HIGH) + + # Verify the prior was added + updated_attrs = space.get_attrs() + x_param = updated_attrs["x"] + assert x_param.has_prior + assert x_param.prior == 0.5 + assert x_param.prior_confidence == ConfidenceLevel.HIGH + + # Test removing a parameter + space = space.remove("y") + + # Verify the parameter was removed + final_attrs = space.get_attrs() + assert "y" not in final_attrs + assert "x" in final_attrs + assert "flag" in final_attrs + + +# Test space conversion functions +def test_space_conversion_functions(): + """Test conversion between classic and NePS spaces.""" + # Create a classic SearchSpace + classic_space = neps.SearchSpace( + { + "x": neps.HPOFloat(0.0, 1.0, prior=0.5, prior_confidence="medium"), + "y": neps.HPOInteger(1, 10, prior=5, prior_confidence="high"), + "z": neps.HPOCategorical(["a", "b", "c"], prior="b", prior_confidence="low"), + } + ) + + # Convert to NePS space + neps_space = convert_classic_to_neps_search_space(classic_space) + assert isinstance(neps_space, PipelineSpace) + + # Verify attributes are preserved + neps_attrs = neps_space.get_attrs() + assert len(neps_attrs) == 3 + assert all(name in neps_attrs for name in ["x", "y", "z"]) + + # Verify types and priors + assert isinstance(neps_attrs["x"], Float) + assert neps_attrs["x"].has_prior + assert neps_attrs["x"].prior == 0.5 + + assert isinstance(neps_attrs["y"], Integer) + assert neps_attrs["y"].has_prior + assert neps_attrs["y"].prior == 5 + + assert isinstance(neps_attrs["z"], Categorical) + assert neps_attrs["z"].has_prior + assert neps_attrs["z"].prior == 1 # Index of "b" in choices + + # Convert back to classic space + converted_back = convert_neps_to_classic_search_space(neps_space) + assert converted_back is not None + assert isinstance(converted_back, neps.SearchSpace) + + # Verify round-trip conversion preserves structure + classic_attrs = converted_back.elements + assert len(classic_attrs) == 3 + assert all(name in classic_attrs for name in ["x", "y", "z"]) + + +# Test algorithm compatibility checking +def test_algorithm_compatibility(): + """Test algorithm compatibility with different space types.""" + # Test NePS-only algorithms + neps_only_algorithms = [ + algorithms.neps_random_search, + algorithms.neps_hyperband, + algorithms.complex_random_search, + ] + + for algo in neps_only_algorithms: + compatibility = check_neps_space_compatibility(algo) + assert compatibility in [ + "neps", + "both", + ], f"Algorithm {algo.__name__} should be neps or both compatible" + + # Test classic algorithms that should work with both + both_compatible_algorithms = [ + algorithms.random_search, + algorithms.hyperband, + ] + + for algo in both_compatible_algorithms: + compatibility = check_neps_space_compatibility(algo) + assert compatibility in [ + "classic", + "both", + ], f"Algorithm {algo.__name__} should be classic or both compatible" + + +# Test with complex PipelineSpace containing Operations and Resampled +def test_complex_neps_space_features(): + """Test complex NePS space features that cannot be converted to classic.""" + + class ComplexNepsSpace(PipelineSpace): + # Basic parameters + factor = Float( + min_value=0.1, + max_value=2.0, + prior=1.0, + prior_confidence=ConfidenceLevel.MEDIUM, + ) + + # Operation with resampled parameters + operation = Operation( + operator=lambda x, y: x * y, + args=(factor, Resampled(factor)), + ) + + # Categorical with operations as choices + choice = Categorical( + choices=(operation, factor), + prior=0, + prior_confidence=ConfidenceLevel.LOW, + ) + + space = ComplexNepsSpace() + + # This space should NOT be convertible to classic + converted = convert_neps_to_classic_search_space(space) + assert converted is None, "Complex NePS space should not be convertible to classic" + + # But should work with NePS-compatible algorithms + compatibility = check_neps_space_compatibility(algorithms.neps_random_search) + assert compatibility in ["neps", "both"] + + +# Test trajectory and metrics functionality +def test_trajectory_and_metrics(tmp_path): + """Test extended trajectory and best_config functionality.""" + + def evaluate_with_metrics(x: float, y: int) -> dict: + """Evaluation function that returns multiple metrics.""" + return { + "objective_to_minimize": x + y, + "accuracy": 1.0 - (x + y) / 11.0, # Dummy accuracy metric + "training_time": x * 10, # Dummy training time + "memory_usage": y * 100, # Dummy memory usage + } + + class MetricsSpace(PipelineSpace): + x = Float(min_value=0.0, max_value=1.0) + y = Integer(min_value=1, max_value=10) + + space = MetricsSpace() + root_directory = tmp_path / "metrics_test" + + # Run optimization + neps.run( + evaluate_pipeline=evaluate_with_metrics, + pipeline_space=space, + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=5, + overwrite_root_directory=True, + ) + + # Check that trajectory and best_config files exist and contain extended metrics + trajectory_file = root_directory / "summary" / "best_config_trajectory.txt" + best_config_file = root_directory / "summary" / "best_config.txt" + + assert trajectory_file.exists(), "Trajectory file should exist" + assert best_config_file.exists(), "Best config file should exist" + + # Read and verify trajectory contains the standard format (not extended metrics in txt files) + trajectory_content = trajectory_file.read_text() + assert "Config ID:" in trajectory_content, "Trajectory should contain Config ID" + assert "Objective to minimize:" in trajectory_content, ( + "Trajectory should contain objective" + ) + assert "Cumulative evaluations:" in trajectory_content, ( + "Trajectory should contain cumulative evaluations" + ) + + # Read and verify best config contains the standard format + best_config_content = best_config_file.read_text() + assert "Config ID:" in best_config_content, "Best config should contain Config ID" + assert "Objective to minimize:" in best_config_content, ( + "Best config should contain objective" + ) diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index faf268c4c..eda19c37f 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -103,7 +103,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, - cost_to_spend=1000, + cost_to_spend=100, # Reduced from 1000 to make tests faster overwrite_root_directory=True, ) neps.status(root_directory, print_summary=True) @@ -143,7 +143,7 @@ def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, - cost_to_spend=1000, + cost_to_spend=100, # Reduced from 1000 to make tests faster overwrite_root_directory=True, ) neps.status(root_directory, print_summary=True) diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index 62fbe3d7e..b95463cda 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -87,7 +87,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, - evaluations_to_spend=200, + evaluations_to_spend=100, overwrite_root_directory=True, ) neps.status(root_directory, print_summary=True) @@ -124,7 +124,7 @@ def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, - evaluations_to_spend=200, + evaluations_to_spend=100, overwrite_root_directory=True, ) neps.status(root_directory, print_summary=True) diff --git a/tests/test_neps_space/test_pipeline_space_methods.py b/tests/test_neps_space/test_pipeline_space_methods.py new file mode 100644 index 000000000..5532257e9 --- /dev/null +++ b/tests/test_neps_space/test_pipeline_space_methods.py @@ -0,0 +1,396 @@ +"""Tests for PipelineSpace dynamic methods (add, remove, add_prior).""" + +from __future__ import annotations + +import pytest + +from neps.space.neps_spaces.parameters import ( + Categorical, + ConfidenceLevel, + Fidelity, + Float, + Integer, + Operation, + PipelineSpace, + Resampled, +) + + +class BasicSpace(PipelineSpace): + """Basic space for testing dynamic methods.""" + + x = Float(min_value=0.0, max_value=1.0) + y = Integer(min_value=1, max_value=10) + z = Categorical(choices=("a", "b", "c")) + + +class SpaceWithPriors(PipelineSpace): + """Space with existing priors for testing.""" + + x = Float( + min_value=0.0, max_value=1.0, prior=0.5, prior_confidence=ConfidenceLevel.MEDIUM + ) + y = Integer(min_value=1, max_value=10, prior=5, prior_confidence=ConfidenceLevel.HIGH) + z = Categorical( + choices=("a", "b", "c"), prior=1, prior_confidence=ConfidenceLevel.LOW + ) + + +# ===== Test add method ===== + + +def test_add_method_basic(): + """Test basic functionality of the add method.""" + space = BasicSpace() + original_attrs = space.get_attrs() + + # Add a new parameter + new_param = Float(min_value=10.0, max_value=20.0) + updated_space = space.add(new_param, "new_float") + + # Original space should be unchanged + assert space.get_attrs() == original_attrs + + # Updated space should have the new parameter + updated_attrs = updated_space.get_attrs() + assert "new_float" in updated_attrs + assert updated_attrs["new_float"] is new_param + assert len(updated_attrs) == len(original_attrs) + 1 + + +def test_add_method_different_types(): + """Test adding different parameter types.""" + space = BasicSpace() + + # Add Integer + space = space.add(Integer(min_value=0, max_value=100), "new_int") + assert "new_int" in space.get_attrs() + assert isinstance(space.get_attrs()["new_int"], Integer) + + # Add Categorical + space = space.add(Categorical(choices=(True, False)), "new_cat") + assert "new_cat" in space.get_attrs() + assert isinstance(space.get_attrs()["new_cat"], Categorical) + + # Add Operation + op = Operation(operator=lambda x: x * 2, args=(space.get_attrs()["x"],)) + space = space.add(op, "new_op") + assert "new_op" in space.get_attrs() + assert isinstance(space.get_attrs()["new_op"], Operation) + + # Add Resampled + resampled = Resampled(space.get_attrs()["x"]) + space = space.add(resampled, "new_resampled") + assert "new_resampled" in space.get_attrs() + assert isinstance(space.get_attrs()["new_resampled"], Resampled) + + +def test_add_method_with_default_name(): + """Test add method with automatic name generation.""" + space = BasicSpace() + original_count = len(space.get_attrs()) + + # Add without specifying name + new_param = Float(min_value=5.0, max_value=15.0) + updated_space = space.add(new_param) + + updated_attrs = updated_space.get_attrs() + assert len(updated_attrs) == original_count + 1 + + # Should have generated a name like "param_4" + generated_names = [name for name in updated_attrs if name.startswith("param_")] + assert len(generated_names) >= 1 + + +def test_add_method_duplicate_parameter(): + """Test adding a parameter with an existing name but same content.""" + space = BasicSpace() + + # Add the same parameter that already exists + existing_param = space.get_attrs()["x"] + updated_space = space.add(existing_param, "x") + + # Should work without error + assert updated_space.get_attrs()["x"] is existing_param + + +def test_add_method_conflicting_parameter(): + """Test adding a different parameter with an existing name.""" + space = BasicSpace() + + # Try to add a different parameter with existing name + different_param = Integer(min_value=0, max_value=5) # Different from existing "x" + + with pytest.raises(ValueError, match="A different parameter with the name"): + space.add(different_param, "x") + + +def test_add_method_chaining(): + """Test chaining multiple add operations.""" + space = BasicSpace() + + # Chain multiple additions + final_space = ( + space.add(Float(min_value=100.0, max_value=200.0), "param1") + .add(Integer(min_value=0, max_value=50), "param2") + .add(Categorical(choices=(1, 2, 3)), "param3") + ) + + attrs = final_space.get_attrs() + assert "param1" in attrs + assert "param2" in attrs + assert "param3" in attrs + assert len(attrs) == 6 # 3 original + 3 new + + +# ===== Test remove method ===== + + +def test_remove_method_basic(): + """Test basic functionality of the remove method.""" + space = BasicSpace() + original_attrs = space.get_attrs() + + # Remove a parameter + updated_space = space.remove("y") + + # Original space should be unchanged + assert space.get_attrs() == original_attrs + + # Updated space should not have the removed parameter + updated_attrs = updated_space.get_attrs() + assert "y" not in updated_attrs + assert "x" in updated_attrs + assert "z" in updated_attrs + assert len(updated_attrs) == len(original_attrs) - 1 + + +def test_remove_method_nonexistent_parameter(): + """Test removing a parameter that doesn't exist.""" + space = BasicSpace() + + with pytest.raises(ValueError, match="No parameter with the name"): + space.remove("nonexistent") + + +def test_remove_method_chaining(): + """Test chaining multiple remove operations.""" + space = BasicSpace() + + # Chain multiple removals + final_space = space.remove("x").remove("y") + + attrs = final_space.get_attrs() + assert "x" not in attrs + assert "y" not in attrs + assert "z" in attrs + assert len(attrs) == 1 + + +def test_remove_all_parameters(): + """Test removing all parameters from a space.""" + space = BasicSpace() + + # Remove all parameters + empty_space = space.remove("x").remove("y").remove("z") + + attrs = empty_space.get_attrs() + assert len(attrs) == 0 + + +# ===== Test add_prior method ===== + + +def test_add_prior_method_basic(): + """Test basic functionality of the add_prior method.""" + space = BasicSpace() + space.get_attrs() + + # Add prior to a parameter without prior + updated_space = space.add_prior("x", prior=0.5, prior_confidence=ConfidenceLevel.HIGH) + + # Original space should be unchanged + original_x = space.get_attrs()["x"] + assert not original_x.has_prior + + # Updated space should have the prior + updated_x = updated_space.get_attrs()["x"] + assert updated_x.has_prior + assert updated_x.prior == 0.5 + assert updated_x.prior_confidence == ConfidenceLevel.HIGH + + +def test_add_prior_method_different_types(): + """Test adding priors to different parameter types.""" + space = BasicSpace() + + # Add prior to Float + space = space.add_prior("x", prior=0.75, prior_confidence=ConfidenceLevel.MEDIUM) + x_param = space.get_attrs()["x"] + assert x_param.has_prior + assert x_param.prior == 0.75 + + # Add prior to Integer + space = space.add_prior("y", prior=7, prior_confidence=ConfidenceLevel.HIGH) + y_param = space.get_attrs()["y"] + assert y_param.has_prior + assert y_param.prior == 7 + + # Add prior to Categorical + space = space.add_prior("z", prior=2, prior_confidence=ConfidenceLevel.LOW) + z_param = space.get_attrs()["z"] + assert z_param.has_prior + assert z_param.prior == 2 + + +def test_add_prior_method_string_confidence(): + """Test add_prior with string confidence levels.""" + space = BasicSpace() + + # Test with string confidence levels + space = space.add_prior("x", prior=0.3, prior_confidence="low") + x_param = space.get_attrs()["x"] + assert x_param.has_prior + assert x_param.prior == 0.3 + assert x_param.prior_confidence == ConfidenceLevel.LOW + + space = space.add_prior("y", prior=8, prior_confidence="medium") + y_param = space.get_attrs()["y"] + assert y_param.prior_confidence == ConfidenceLevel.MEDIUM + + space = space.add_prior("z", prior=0, prior_confidence="high") + z_param = space.get_attrs()["z"] + assert z_param.prior_confidence == ConfidenceLevel.HIGH + + +def test_add_prior_method_nonexistent_parameter(): + """Test adding prior to a parameter that doesn't exist.""" + space = BasicSpace() + + with pytest.raises(ValueError, match="No parameter with the name"): + space.add_prior("nonexistent", prior=0.5, prior_confidence=ConfidenceLevel.MEDIUM) + + +def test_add_prior_method_already_has_prior(): + """Test adding prior to a parameter that already has one.""" + space = SpaceWithPriors() + + with pytest.raises(ValueError, match="already has a prior"): + space.add_prior("x", prior=0.8, prior_confidence=ConfidenceLevel.LOW) + + +def test_add_prior_method_unsupported_type(): + """Test adding prior to unsupported parameter types.""" + # Create space with an Operation (which doesn't support priors) + space = BasicSpace() + op = Operation(operator=lambda x: x * 2, args=(space.get_attrs()["x"],)) + space = space.add(op, "operation_param") + + with pytest.raises(ValueError, match="does not support priors"): + space.add_prior( + "operation_param", prior=0.5, prior_confidence=ConfidenceLevel.MEDIUM + ) + + +# ===== Test combined operations ===== + + +def test_combined_operations(): + """Test combining add, remove, and add_prior operations.""" + space = BasicSpace() + + # Complex chain of operations + final_space = ( + space.add(Float(min_value=50.0, max_value=100.0), "new_param") + .remove("y") + .add_prior("x", prior=0.25, prior_confidence=ConfidenceLevel.HIGH) + .add_prior("new_param", prior=75.0, prior_confidence=ConfidenceLevel.MEDIUM) + .add(Integer(min_value=0, max_value=10), "another_param") + ) + + attrs = final_space.get_attrs() + + # Check structure + assert "x" in attrs + assert "y" not in attrs # Removed + assert "z" in attrs + assert "new_param" in attrs + assert "another_param" in attrs + + # Check priors + assert attrs["x"].has_prior + assert attrs["x"].prior == 0.25 + assert attrs["new_param"].has_prior + assert attrs["new_param"].prior == 75.0 + assert not attrs["z"].has_prior + assert not attrs["another_param"].has_prior + + +def test_immutability(): + """Test that all operations return new instances and don't modify originals.""" + original_space = BasicSpace() + original_attrs = original_space.get_attrs() + + # Perform various operations + space1 = original_space.add(Float(min_value=0.0, max_value=1.0), "temp") + space2 = original_space.remove("x") + space3 = original_space.add_prior("y", prior=5, prior_confidence=ConfidenceLevel.HIGH) + + # Original should be unchanged + assert original_space.get_attrs() == original_attrs + assert not original_space.get_attrs()["y"].has_prior + + # Each operation should create different instances + assert space1 is not original_space + assert space2 is not original_space + assert space3 is not original_space + assert space1 is not space2 + assert space2 is not space3 + + +def test_fidelity_operations(): + """Test operations with fidelity parameters.""" + + class FidelitySpace(PipelineSpace): + x = Float(min_value=0.0, max_value=1.0) + epochs = Fidelity(Integer(min_value=1, max_value=100)) + + space = FidelitySpace() + + # Add another parameter (non-fidelity since add doesn't support Fidelity directly) + new_param = Integer(min_value=1, max_value=50) + space = space.add(new_param, "batch_size") + + # Check that original fidelity is preserved + fidelity_attrs = space.fidelity_attrs + assert "epochs" in fidelity_attrs + assert len(fidelity_attrs) == 1 + + # Remove the fidelity parameter + space = space.remove("epochs") + fidelity_attrs = space.fidelity_attrs + assert "epochs" not in fidelity_attrs + assert len(fidelity_attrs) == 0 + + # Regular parameters should still be there + regular_attrs = space.get_attrs() + assert "x" in regular_attrs + assert "batch_size" in regular_attrs + + +def test_space_string_representation(): + """Test that string representation works after operations.""" + space = BasicSpace() + + # Perform operations + modified_space = ( + space.add(Float(min_value=10.0, max_value=20.0), "added_param") + .remove("y") + .add_prior("x", prior=0.8, prior_confidence=ConfidenceLevel.LOW) + ) + + # Should be able to get string representation without error + str_repr = str(modified_space) + assert "PipelineSpace" in str_repr + assert "added_param" in str_repr + assert "y" not in str_repr # Should be removed diff --git a/tests/test_neps_space/test_search_space__nos_like.py b/tests/test_neps_space/test_search_space__nos_like.py index ca882bb2a..4d39c9d19 100644 --- a/tests/test_neps_space/test_search_space__nos_like.py +++ b/tests/test_neps_space/test_search_space__nos_like.py @@ -121,7 +121,7 @@ def test_resolve(): pipeline = NosBench() try: - resolved_pipeline, resolution_context = neps_space.resolve(pipeline) + resolved_pipeline, _ = neps_space.resolve(pipeline) except RecursionError: pytest.xfail("XFAIL due to too much recursion.") raise diff --git a/tests/test_neps_space/test_space_conversion_and_compatibility.py b/tests/test_neps_space/test_space_conversion_and_compatibility.py new file mode 100644 index 000000000..360c57374 --- /dev/null +++ b/tests/test_neps_space/test_space_conversion_and_compatibility.py @@ -0,0 +1,495 @@ +"""Tests for space conversion and algorithm compatibility in NePS.""" + +from __future__ import annotations + +import pytest + +import neps +from neps.optimizers import algorithms +from neps.space.neps_spaces.neps_space import ( + check_neps_space_compatibility, + convert_classic_to_neps_search_space, + convert_neps_to_classic_search_space, +) +from neps.space.neps_spaces.parameters import ( + Categorical, + ConfidenceLevel, + Fidelity, + Float, + Integer, + Operation, + PipelineSpace, + Resampled, +) + + +class SimpleHPOSpace(PipelineSpace): + """Simple hyperparameter-only space that can be converted to classic.""" + + x = Float( + min_value=0.0, max_value=1.0, prior=0.5, prior_confidence=ConfidenceLevel.MEDIUM + ) + y = Integer(min_value=1, max_value=10, prior=5, prior_confidence=ConfidenceLevel.HIGH) + z = Categorical( + choices=("a", "b", "c"), prior=1, prior_confidence=ConfidenceLevel.LOW + ) + + +class SimpleHPOWithFidelitySpace(PipelineSpace): + """Simple hyperparameter space with fidelity.""" + + x = Float( + min_value=0.0, max_value=1.0, prior=0.5, prior_confidence=ConfidenceLevel.MEDIUM + ) + y = Integer(min_value=1, max_value=10, prior=5, prior_confidence=ConfidenceLevel.HIGH) + epochs = Fidelity(Integer(min_value=1, max_value=100)) + + +class ComplexNepsSpace(PipelineSpace): + """Complex NePS space that cannot be converted to classic.""" + + # Basic parameters + factor = Float( + min_value=0.1, max_value=2.0, prior=1.0, prior_confidence=ConfidenceLevel.MEDIUM + ) + + # Operation with resampled parameters + operation = Operation( + operator=lambda x, y: x * y, + args=(factor, Resampled(factor)), + ) + + # Categorical with operations as choices + choice = Categorical( + choices=(operation, factor), + prior=0, + prior_confidence=ConfidenceLevel.LOW, + ) + + +# ===== Test space conversion functions ===== + + +def test_convert_classic_to_neps(): + """Test conversion from classic SearchSpace to NePS PipelineSpace.""" + # Create a classic SearchSpace with various parameter types + classic_space = neps.SearchSpace( + { + "float_param": neps.HPOFloat(0.0, 1.0, prior=0.5, prior_confidence="medium"), + "int_param": neps.HPOInteger(1, 10, prior=5, prior_confidence="high"), + "cat_param": neps.HPOCategorical( + ["a", "b", "c"], prior="b", prior_confidence="low" + ), + "fidelity_param": neps.HPOInteger(1, 100, is_fidelity=True), + "constant_param": neps.HPOConstant("constant_value"), + } + ) + + # Convert to NePS space + neps_space = convert_classic_to_neps_search_space(classic_space) + assert isinstance(neps_space, PipelineSpace) + + # Verify attributes are preserved + neps_attrs = neps_space.get_attrs() + assert len(neps_attrs) == 5 + assert all( + name in neps_attrs + for name in [ + "float_param", + "int_param", + "cat_param", + "fidelity_param", + "constant_param", + ] + ) + + # Verify types and properties + assert isinstance(neps_attrs["float_param"], Float) + assert neps_attrs["float_param"].has_prior + assert neps_attrs["float_param"].prior == 0.5 + assert neps_attrs["float_param"].prior_confidence == ConfidenceLevel.MEDIUM + + assert isinstance(neps_attrs["int_param"], Integer) + assert neps_attrs["int_param"].has_prior + assert neps_attrs["int_param"].prior == 5 + assert neps_attrs["int_param"].prior_confidence == ConfidenceLevel.HIGH + + assert isinstance(neps_attrs["cat_param"], Categorical) + assert neps_attrs["cat_param"].has_prior + assert neps_attrs["cat_param"].prior == 1 # Index of "b" in choices + assert neps_attrs["cat_param"].prior_confidence == ConfidenceLevel.LOW + + assert isinstance(neps_attrs["fidelity_param"], Fidelity) + assert isinstance(neps_attrs["fidelity_param"]._domain, Integer) + + # Constant should be preserved as-is + assert neps_attrs["constant_param"] == "constant_value" + + +def test_convert_neps_to_classic_simple(): + """Test conversion from simple NePS PipelineSpace to classic SearchSpace.""" + space = SimpleHPOSpace() + + # Convert to classic space + classic_space = convert_neps_to_classic_search_space(space) + assert classic_space is not None + assert isinstance(classic_space, neps.SearchSpace) + + # Verify attributes are preserved + classic_attrs = classic_space.elements + assert len(classic_attrs) == 3 + assert all(name in classic_attrs for name in ["x", "y", "z"]) + + # Verify types and priors + x_param = classic_attrs["x"] + assert isinstance(x_param, neps.HPOFloat) + assert x_param.lower == 0.0 + assert x_param.upper == 1.0 + assert x_param.prior == 0.5 + assert x_param.prior_confidence == "medium" + + y_param = classic_attrs["y"] + assert isinstance(y_param, neps.HPOInteger) + assert y_param.lower == 1 + assert y_param.upper == 10 + assert y_param.prior == 5 + assert y_param.prior_confidence == "high" + + z_param = classic_attrs["z"] + assert isinstance(z_param, neps.HPOCategorical) + assert set(z_param.choices) == {"a", "b", "c"} # Order might vary + assert z_param.prior == "b" + assert z_param.prior_confidence == "low" + + +def test_convert_neps_to_classic_with_fidelity(): + """Test conversion from NePS PipelineSpace with fidelity to classic SearchSpace.""" + space = SimpleHPOWithFidelitySpace() + + # Convert to classic space + classic_space = convert_neps_to_classic_search_space(space) + assert classic_space is not None + assert isinstance(classic_space, neps.SearchSpace) + + # Verify fidelity parameter + epochs_param = classic_space.elements["epochs"] + assert isinstance(epochs_param, neps.HPOInteger) + assert epochs_param.is_fidelity + assert epochs_param.lower == 1 + assert epochs_param.upper == 100 + + +def test_convert_complex_neps_to_classic_fails(): + """Test that complex NePS spaces cannot be converted to classic.""" + space = ComplexNepsSpace() + + # This space should NOT be convertible to classic + converted = convert_neps_to_classic_search_space(space) + assert converted is None + + +def test_round_trip_conversion(): + """Test that simple spaces can be converted back and forth.""" + # Start with classic space + original_classic = neps.SearchSpace( + { + "x": neps.HPOFloat(0.0, 1.0, prior=0.5, prior_confidence="medium"), + "y": neps.HPOInteger(1, 10, prior=5, prior_confidence="high"), + "z": neps.HPOCategorical(["a", "b", "c"], prior="b", prior_confidence="low"), + } + ) + + # Convert to NePS and back + neps_space = convert_classic_to_neps_search_space(original_classic) + converted_back = convert_neps_to_classic_search_space(neps_space) + + assert converted_back is not None + assert len(converted_back.elements) == len(original_classic.elements) + + # Verify parameters are equivalent + for name in original_classic.elements: + original_param = original_classic.elements[name] + converted_param = converted_back.elements[name] + + assert type(original_param) is type(converted_param) + + # Check bounds for numerical parameters + if isinstance(original_param, neps.HPOFloat | neps.HPOInteger): + assert original_param.lower == converted_param.lower + assert original_param.upper == converted_param.upper + + # Check choices for categorical parameters + if isinstance(original_param, neps.HPOCategorical): + # Sort choices for comparison since order might differ + assert set(original_param.choices) == set(converted_param.choices) + + # Check priors + if hasattr(original_param, "prior") and hasattr(converted_param, "prior"): + assert original_param.prior == converted_param.prior + + +# ===== Test algorithm compatibility ===== + + +def test_neps_only_algorithms(): + """Test that NePS-only algorithms are correctly identified.""" + neps_only_algorithms = [ + algorithms.neps_random_search, + algorithms.neps_hyperband, + algorithms.complex_random_search, + algorithms.neps_priorband, + ] + + for algo in neps_only_algorithms: + compatibility = check_neps_space_compatibility(algo) + assert compatibility in [ + "neps", + "both", + ], f"Algorithm {algo.__name__} should be neps or both compatible" + + +def test_classic_and_both_algorithms(): + """Test that classic algorithms that work with both spaces are correctly identified.""" + both_compatible_algorithms = [ + algorithms.random_search, + algorithms.hyperband, + algorithms.priorband, + ] + + for algo in both_compatible_algorithms: + compatibility = check_neps_space_compatibility(algo) + assert compatibility in [ + "classic", + "both", + ], f"Algorithm {algo.__name__} should be classic or both compatible" + + +def test_algorithm_compatibility_with_string_names(): + """Test algorithm compatibility checking with string names.""" + # Note: String-based compatibility checking may not be fully implemented + # Test with actual algorithm functions instead + + # Test NePS-only algorithms + neps_only_algorithms = [ + algorithms.neps_random_search, + algorithms.neps_hyperband, + algorithms.complex_random_search, + ] + + for algo in neps_only_algorithms: + compatibility = check_neps_space_compatibility(algo) + assert compatibility in [ + "neps", + "both", + ], f"Algorithm {algo.__name__} should be neps or both compatible" + + # Test classic/both algorithms + classic_algorithms = [ + algorithms.random_search, + algorithms.hyperband, + ] + + for algo in classic_algorithms: + compatibility = check_neps_space_compatibility(algo) + assert compatibility in [ + "classic", + "both", + ], f"Algorithm {algo.__name__} should be classic or both compatible" + + +def test_algorithm_compatibility_with_tuples(): + """Test algorithm compatibility checking with tuple configurations.""" + # Test with tuple configuration + neps_config = ("neps_random_search", {"ignore_fidelity": True}) + compatibility = check_neps_space_compatibility(neps_config) + assert compatibility in ["neps", "both"] + + classic_config = ("random_search", {"some_param": "value"}) + compatibility = check_neps_space_compatibility(classic_config) + assert compatibility in ["classic", "both"] + + +def test_auto_algorithm_compatibility(): + """Test that 'auto' algorithm is handled correctly.""" + compatibility = check_neps_space_compatibility("auto") + assert compatibility == "both" + + +# ===== Test NePS hyperband specific functionality ===== + + +def test_neps_hyperband_requires_fidelity(): + """Test that neps_hyperband requires fidelity parameters.""" + # Space without fidelity should fail + space_no_fidelity = SimpleHPOSpace() + + with pytest.raises((ValueError, AssertionError)): + algorithms.neps_hyperband(pipeline_space=space_no_fidelity) + + +def test_neps_hyperband_accepts_fidelity_space(): + """Test that neps_hyperband accepts spaces with fidelity.""" + space_with_fidelity = SimpleHPOWithFidelitySpace() + + # Should not raise an error + optimizer = algorithms.neps_hyperband(pipeline_space=space_with_fidelity) + assert optimizer is not None + + +def test_neps_hyperband_rejects_classic_space(): + """Test that neps_hyperband rejects classic SearchSpace.""" + # Type system should prevent this at compile time + # Instead, test that type checking works as expected + + # Create a proper NePS space that should work + class TestSpace(PipelineSpace): + x = Float(0.0, 1.0) + epochs = Fidelity(Integer(1, 100)) + + space = TestSpace() + + # This should work fine with proper NePS space + optimizer = algorithms.neps_hyperband(pipeline_space=space, eta=3) + assert optimizer is not None + + +@pytest.mark.parametrize("eta", [2, 3, 4, 5]) +def test_neps_hyperband_eta_values(eta): + """Test neps_hyperband with different eta values.""" + space = SimpleHPOWithFidelitySpace() + optimizer = algorithms.neps_hyperband(pipeline_space=space, eta=eta) + assert optimizer is not None + + +@pytest.mark.parametrize("sampler", ["uniform", "prior"]) +def test_neps_hyperband_samplers(sampler): + """Test neps_hyperband with different samplers.""" + space = SimpleHPOWithFidelitySpace() + optimizer = algorithms.neps_hyperband(pipeline_space=space, sampler=sampler) + assert optimizer is not None + + +@pytest.mark.parametrize("sample_prior_first", [False, True, "highest_fidelity"]) +def test_neps_hyperband_sample_prior_first(sample_prior_first): + """Test neps_hyperband with different sample_prior_first options.""" + space = SimpleHPOWithFidelitySpace() + optimizer = algorithms.neps_hyperband( + pipeline_space=space, sample_prior_first=sample_prior_first + ) + assert optimizer is not None + + +# ===== Test space compatibility with different optimizers ===== + + +def test_simple_space_works_with_both_optimizers(): + """Test that simple HPO spaces work with both classic and NePS optimizers.""" + space = SimpleHPOSpace() + + # Should work with NePS-only optimizers + neps_optimizer = algorithms.neps_random_search(pipeline_space=space) + assert neps_optimizer is not None + + # Should also be convertible and work with classic optimizers + converted_space = convert_neps_to_classic_search_space(space) + assert converted_space is not None + + classic_optimizer = algorithms.random_search( + pipeline_space=converted_space, use_priors=True + ) + assert classic_optimizer is not None + + +def test_complex_space_only_works_with_neps_optimizers(): + """Test that complex NePS spaces only work with NePS-compatible optimizers.""" + space = ComplexNepsSpace() + + # Should work with NePS optimizers + neps_optimizer = algorithms.neps_random_search(pipeline_space=space) + assert neps_optimizer is not None + + # Should NOT be convertible to classic + converted_space = convert_neps_to_classic_search_space(space) + assert converted_space is None + + +def test_fidelity_space_compatibility(): + """Test fidelity space compatibility with different optimizers.""" + space = SimpleHPOWithFidelitySpace() + + # Should work with neps_hyperband (requires fidelity) + hyperband_optimizer = algorithms.neps_hyperband(pipeline_space=space) + assert hyperband_optimizer is not None + + # Should also work with other NePS optimizers (but need to ignore fidelity) + random_optimizer = algorithms.neps_random_search( + pipeline_space=space, ignore_fidelity=True + ) + assert random_optimizer is not None + + # Should be convertible to classic for non-neps-specific algorithms + converted_space = convert_neps_to_classic_search_space(space) + assert converted_space is not None + + # Classic hyperband should work with converted space + classic_hyperband = algorithms.hyperband(pipeline_space=converted_space) + assert classic_hyperband is not None + + +# ===== Edge cases and error handling ===== + + +def test_conversion_preserves_log_scaling(): + """Test that log scaling is preserved during conversion.""" + classic_space = neps.SearchSpace( + { + "log_param": neps.HPOFloat(1e-5, 1e-1, log=True), + } + ) + + neps_space = convert_classic_to_neps_search_space(classic_space) + # Access the Float parameter and check if it has a _log attribute + log_param_neps = neps_space.get_attrs()["log_param"] + assert hasattr(log_param_neps, "_log") + assert log_param_neps._log is True + + # Round-trip conversion should now preserve log scaling + converted_back = convert_neps_to_classic_search_space(neps_space) + assert converted_back is not None + # Check the log property specifically for float parameters + log_param = converted_back.elements["log_param"] + assert isinstance(log_param, neps.HPOFloat) + assert log_param.log is True + + +def test_conversion_handles_missing_priors(): + """Test that conversion works correctly when priors are missing.""" + classic_space = neps.SearchSpace( + { + "no_prior": neps.HPOFloat(0.0, 1.0), # No prior specified + } + ) + + neps_space = convert_classic_to_neps_search_space(classic_space) + param = neps_space.get_attrs()["no_prior"] + assert not param.has_prior + + converted_back = convert_neps_to_classic_search_space(neps_space) + assert converted_back is not None + # Check the prior property specifically for float parameters + no_prior_param = converted_back.elements["no_prior"] + assert isinstance(no_prior_param, neps.HPOFloat) + assert no_prior_param.prior is None + + +def test_conversion_handles_empty_spaces(): + """Test that conversion handles edge cases gracefully.""" + # Empty classic space + empty_classic = neps.SearchSpace({}) + neps_space = convert_classic_to_neps_search_space(empty_classic) + assert len(neps_space.get_attrs()) == 0 + + # Convert back + converted_back = convert_neps_to_classic_search_space(neps_space) + assert converted_back is not None + assert len(converted_back.elements) == 0 diff --git a/tests/test_neps_space/test_trajectory_and_metrics.py b/tests/test_neps_space/test_trajectory_and_metrics.py new file mode 100644 index 000000000..91b6de73e --- /dev/null +++ b/tests/test_neps_space/test_trajectory_and_metrics.py @@ -0,0 +1,505 @@ +"""Tests for extended trajectory and metrics functionality in NePS.""" + +from __future__ import annotations + +import re +import tempfile +from pathlib import Path + +import pytest + +import neps +from neps.optimizers import algorithms +from neps.space.neps_spaces.parameters import ( + Fidelity, + Float, + Integer, + PipelineSpace, +) + + +class SimpleSpace(PipelineSpace): + """Simple space for testing metrics functionality.""" + + x = Float(min_value=0.0, max_value=1.0) + y = Integer(min_value=1, max_value=10) + + +class SpaceWithFidelity(PipelineSpace): + """Space with fidelity for testing multi-fidelity metrics.""" + + x = Float(min_value=0.0, max_value=1.0) + y = Integer(min_value=1, max_value=10) + epochs = Fidelity(Integer(min_value=1, max_value=50)) + + +def simple_evaluation(x: float, y: int) -> dict: + """Simple evaluation function that returns multiple metrics.""" + return { + "objective_to_minimize": x + y, + "accuracy": max(0.0, 1.0 - (x + y) / 11.0), # Dummy accuracy metric + "training_time": x * 10 + y, # Dummy training time + "memory_usage": y * 100, # Dummy memory usage + "custom_metric": x * y, # Custom metric + } + + +def fidelity_evaluation(x: float, y: int, epochs: int) -> dict: + """Evaluation function with fidelity that affects metrics.""" + base_objective = x + y + fidelity_factor = epochs / 50.0 # Scale based on fidelity + + return { + "objective_to_minimize": ( + base_objective / fidelity_factor + ), # Better with more epochs + "accuracy": min(1.0, fidelity_factor * (1.0 - base_objective / 11.0)), + "training_time": epochs * (x * 10 + y), # More epochs = more time + "memory_usage": y * 100 + epochs * 10, # Memory increases with epochs + "convergence_rate": 1.0 / epochs, # Faster convergence with more epochs + "epochs_used": epochs, # Track actual epochs used + } + + +def failing_evaluation(x: float, y: int) -> dict: + """Evaluation that sometimes fails to test error handling.""" + if x > 0.8 or y > 8: + raise ValueError("Simulated failure for testing") + + return { + "objective_to_minimize": x + y, + "success_rate": 1.0, + } + + +# ===== Test basic trajectory and metrics ===== + + +def test_basic_trajectory_functionality(): + """Test basic trajectory functionality without checking specific file structure.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "basic_test" + + # Run optimization + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Check that some optimization files were created + assert root_directory.exists() + + # Find the summary directory and check for result files + summary_dir = root_directory / "summary" + assert summary_dir.exists(), "Summary directory should exist" + + # Check for best config file + best_config_file = summary_dir / "best_config.txt" + assert best_config_file.exists(), "Best config file should exist" + + # Check if trajectory file contains our evaluation results + best_config_content = best_config_file.read_text() + assert "Objective to minimize" in best_config_content # Different casing + + # Check for CSV files that contain the optimization summary + csv_files = list(summary_dir.glob("*.csv")) + assert len(csv_files) > 0, "Should have CSV summary files" + + # Check that basic optimization data is present + csv_content = csv_files[0].read_text() + assert "objective_to_minimize" in csv_content, "Should contain objective values" + + +def test_best_config_with_multiple_metrics(): + """Test that best_config file contains multiple metrics.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "best_config_test" + + # Run optimization + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=5, + overwrite_root_directory=True, + ) + + # Check that best_config file exists + best_config_file = root_directory / "summary" / "best_config.txt" + assert best_config_file.exists(), "Best config file should exist" + + # Read and verify best config contains multiple metrics + best_config_content = best_config_file.read_text() + + # Should contain the primary objective + assert "Objective to minimize" in best_config_content + + # Note: Additional metrics may not be persisted to summary files + # They are used during evaluation but only the main objective is saved + # Should contain configuration parameters + assert ( + "x" in best_config_content or "SAMPLING__Resolvable.x" in best_config_content + ) + assert ( + "y" in best_config_content or "SAMPLING__Resolvable.y" in best_config_content + ) + + +def test_trajectory_with_fidelity(): + """Test trajectory with fidelity-based evaluation.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "fidelity_test" + + # Run optimization with fidelity + neps.run( + evaluate_pipeline=fidelity_evaluation, + pipeline_space=SpaceWithFidelity(), + optimizer=("neps_random_search", {"ignore_fidelity": True}), + root_directory=str(root_directory), + evaluations_to_spend=10, + overwrite_root_directory=True, + ) + + # Check trajectory file + trajectory_file = root_directory / "summary" / "best_config_trajectory.txt" + assert trajectory_file.exists() + + trajectory_content = trajectory_file.read_text() + + # Should contain basic optimization data + assert "Config ID" in trajectory_content + assert "Objective" in trajectory_content + + # Should track configuration parameters (including fidelity if preserved) + assert any( + param in trajectory_content + for param in ["x", "y", "epochs", "SAMPLING__Resolvable"] + ) + + +def test_cumulative_metrics_tracking(): + """Test that cumulative evaluations are tracked in trajectory files.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "cumulative_test" + + # Run optimization + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=5, + overwrite_root_directory=True, + ) + + # Read trajectory + trajectory_file = root_directory / "summary" / "best_config_trajectory.txt" + trajectory_content = trajectory_file.read_text() + + # Should have the expected header + assert ( + "Best configs and their objectives across evaluations:" in trajectory_content + ) + + # Should track cumulative evaluations + assert "Cumulative evaluations:" in trajectory_content + + # Should have multiple config entries (at least some evaluations) + config_count = trajectory_content.count("Config ID:") + assert config_count >= 1, "Should have at least one config entry" + + # Should have objective values + assert "Objective to minimize:" in trajectory_content + + +# ===== Test error handling in metrics ===== + + +def test_trajectory_with_failed_evaluations(): + """Test that trajectory handles failed evaluations correctly.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "error_test" + + # Run optimization that will have some failures + neps.run( + evaluate_pipeline=failing_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=15, # More evaluations to ensure some failures + overwrite_root_directory=True, + ignore_errors=True, # Allow continuing after errors + ) + + # Check that trajectory file exists + trajectory_file = root_directory / "summary" / "best_config_trajectory.txt" + assert trajectory_file.exists() + + # Read trajectory + trajectory_content = trajectory_file.read_text() + lines = trajectory_content.strip().split("\n") + + # Should have at least some successful evaluations + assert len(lines) >= 2 # Header + at least one evaluation + + # Check that errors are handled gracefully + # (The exact behavior may vary, but the file should exist and be readable) + assert "Objective to minimize" in trajectory_content # Different casing + + +# ===== Test hyperband-specific metrics ===== + + +def test_neps_hyperband_metrics(): + """Test that neps_hyperband produces extended metrics.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "hyperband_test" + + # Run neps_hyperband optimization + neps.run( + evaluate_pipeline=fidelity_evaluation, + pipeline_space=SpaceWithFidelity(), + optimizer=algorithms.neps_hyperband, + root_directory=str(root_directory), + fidelities_to_spend=20, # Use fidelities_to_spend for multi-fidelity optimizers + overwrite_root_directory=True, + ) + + # Check trajectory file + trajectory_file = root_directory / "summary" / "best_config_trajectory.txt" + assert trajectory_file.exists() + + trajectory_content = trajectory_file.read_text() + + # Should contain basic optimization data + assert "Objective" in trajectory_content + + # Should contain configuration information + assert any( + param in trajectory_content for param in ["epochs", "SAMPLING__Resolvable"] + ) + + # Should have multiple evaluations with different fidelities + lines = trajectory_content.strip().split("\n") + assert len(lines) >= 5 # Should have some evaluations + + +# ===== Test metrics with different optimizers ===== + + +@pytest.mark.parametrize( + "optimizer", + [ + algorithms.neps_random_search, + algorithms.complex_random_search, + ], +) +def test_metrics_with_different_optimizers(optimizer): + """Test that txt file format is consistent across different optimizers.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / f"optimizer_test_{optimizer.__name__}" + + # Run optimization + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=optimizer, + root_directory=str(root_directory), + evaluations_to_spend=5, + overwrite_root_directory=True, + ) + + # Check files exist + trajectory_file = root_directory / "summary" / "best_config_trajectory.txt" + best_config_file = root_directory / "summary" / "best_config.txt" + + assert trajectory_file.exists() + assert best_config_file.exists() + + # Check contents match expected txt format (only objective_to_minimize is tracked) + trajectory_content = trajectory_file.read_text() + best_config_content = best_config_file.read_text() + + # Both should contain the standard txt file format elements + for content in [trajectory_content, best_config_content]: + assert "Config ID:" in content + assert "Objective to minimize:" in content + assert "Cumulative evaluations:" in content + assert "Config:" in content + + # Trajectory file should have the header + assert ( + "Best configs and their objectives across evaluations:" in trajectory_content + ) + + +# ===== Test metric value validation ===== + + +def test_metric_values_are_reasonable(): + """Test that reported objective values are reasonable in txt files.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "validation_test" + + # Run optimization + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=5, + overwrite_root_directory=True, + ) + + # Read trajectory and parse objective values + trajectory_file = root_directory / "summary" / "best_config_trajectory.txt" + trajectory_content = trajectory_file.read_text() + + # Extract objective values from the actual txt format + objective_matches = re.findall( + r"Objective to minimize: ([\d.]+)", trajectory_content + ) + + # Check that we found some objectives + assert len(objective_matches) > 0, "No objective values found in trajectory" + + # Check each objective value is reasonable + for obj_str in objective_matches: + objective = float(obj_str) + # Objective should be in reasonable range (x+y where x in [0,1], y in [1,10]) + assert 1.0 <= objective <= 11.0, ( + f"Objective {objective} out of expected range [1.0, 11.0]" + ) + + +# ===== Test file format and structure ===== + + +def test_trajectory_file_format(): + """Test that trajectory txt file has correct format.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "format_test" + + # Run optimization + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Check trajectory file format (txt format, not CSV) + trajectory_file = root_directory / "summary" / "best_config_trajectory.txt" + trajectory_content = trajectory_file.read_text() + + # Should have the expected txt file structure + assert ( + "Best configs and their objectives across evaluations:" in trajectory_content + ) + assert "Config ID:" in trajectory_content + assert "Objective to minimize:" in trajectory_content + assert "Cumulative evaluations:" in trajectory_content + assert "Config:" in trajectory_content + + # Should have separator lines + assert ( + "--------------------------------------------------------------------------------" + in trajectory_content + ) + + +def test_results_directory_structure(): + """Test that results directory has expected structure.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "structure_test" + + # Run optimization + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Check directory structure + results_dir = root_directory / "summary" + assert results_dir.exists() + assert results_dir.is_dir() + + # Check expected files + expected_files = ["best_config_trajectory.txt", "best_config.txt"] + for filename in expected_files: + file_path = results_dir / filename + assert file_path.exists(), f"Expected file {filename} should exist" + assert file_path.is_file(), f"{filename} should be a file" + + # File should not be empty + content = file_path.read_text() + assert len(content.strip()) > 0, f"{filename} should not be empty" + + +def test_neps_revisit_run_with_trajectory(): + """Test that NePS can revisit an earlier run and use incumbent trajectory.""" + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "revisit_test" + + # First run - create initial optimization + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, # Start fresh + ) + + # Check that initial files were created + summary_dir = root_directory / "summary" + assert summary_dir.exists() + best_config_file = summary_dir / "best_config.txt" + trajectory_file = summary_dir / "best_config_trajectory.txt" + assert best_config_file.exists() + assert trajectory_file.exists() + + # Read initial trajectory + initial_trajectory = trajectory_file.read_text() + assert "Config ID:" in initial_trajectory + assert "Objective to minimize:" in initial_trajectory + + # Second run - revisit without overwriting + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=2, # Add 2 more evaluations + overwrite_root_directory=False, # Don't overwrite, continue from previous + ) + + # Check that trajectory was updated with new evaluations + updated_trajectory = trajectory_file.read_text() + + # The updated trajectory should contain the original entries plus new ones + assert len(updated_trajectory) >= len(initial_trajectory) + assert "Config ID:" in updated_trajectory + assert "Objective to minimize:" in updated_trajectory + + # Should have evidence of multiple evaluations + # Note: trajectory.txt only tracks BEST configs, not all evaluations + # So we check that the files still have the expected format and content + assert "Config ID:" in updated_trajectory + assert "Objective to minimize:" in updated_trajectory + + # The updated content should be at least as long (potentially with timing info added) + assert len(updated_trajectory) >= len(initial_trajectory), ( + "Updated trajectory should have at least the same content" + ) From 4ed7bda272472a9920332a869c20820d436bd4a7 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 23 Oct 2025 05:04:21 +0200 Subject: [PATCH 088/156] Remove old warmstarting functionality, unused example notebooks and scripts - Deleted `priors_test.ipynb` and `warmstarting.py` as they were no longer needed. --- neps/__init__.py | 3 +- neps/api.py | 248 +------------- neps_examples/basic_usage/algo_tests.ipynb | 217 ------------ neps_examples/basic_usage/priors_test.ipynb | 320 ------------------ neps_examples/efficiency/warmstarting.py | 96 ------ ...st_neps_integration_priorband__max_cost.py | 0 6 files changed, 5 insertions(+), 879 deletions(-) delete mode 100644 neps_examples/basic_usage/algo_tests.ipynb delete mode 100644 neps_examples/basic_usage/priors_test.ipynb delete mode 100644 neps_examples/efficiency/warmstarting.py rename {tests/test_neps_space => neps_examples/test_files}/test_neps_integration_priorband__max_cost.py (100%) diff --git a/neps/__init__.py b/neps/__init__.py index 11b46b7b7..be2a83791 100644 --- a/neps/__init__.py +++ b/neps/__init__.py @@ -5,7 +5,7 @@ and algorithms. """ -from neps.api import run, save_pipeline_results, warmstart_neps +from neps.api import run, save_pipeline_results from neps.optimizers import algorithms from neps.optimizers.ask_and_tell import AskAndTell from neps.optimizers.optimizer import SampledConfig @@ -51,5 +51,4 @@ "save_pipeline_results", "status", "tblogger", - "warmstart_neps", ] diff --git a/neps/api.py b/neps/api.py index 1e8dfecae..e1123f6ab 100644 --- a/neps/api.py +++ b/neps/api.py @@ -3,22 +3,14 @@ from __future__ import annotations import logging -import os -import shutil -import socket -import time import warnings -from collections.abc import Callable, Mapping, Sequence +from collections.abc import Callable, Mapping from pathlib import Path from typing import TYPE_CHECKING, Any, Concatenate, Literal -import neps from neps.optimizers import AskFunction, OptimizerChoice, load_optimizer -from neps.optimizers.ask_and_tell import AskAndTell from neps.runtime import _launch_runtime, _save_results -from neps.space.neps_spaces import neps_space from neps.space.neps_spaces.neps_space import ( - NepsCompatConverter, adjust_evaluation_pipeline_for_neps_space, check_neps_space_compatibility, convert_classic_to_neps_search_space, @@ -26,8 +18,6 @@ ) from neps.space.neps_spaces.parameters import PipelineSpace from neps.space.parsing import convert_to_space -from neps.state import NePSState, OptimizationState, SeedSnapshot -from neps.state.neps_state import TrialRepo from neps.state.pipeline_eval import EvaluatePipelineReturn from neps.status.status import post_run_csv from neps.utils.common import dynamic_load_object @@ -71,16 +61,6 @@ def run( # noqa: C901, D417, PLR0913, PLR0912, PLR0915 | CustomOptimizer | Literal["auto"] ) = "auto", - warmstart_configs: ( - list[ - tuple[ - dict[str, Any] | Mapping[str, Any], - dict[str, Any] | Mapping[str, Any], - Any, - ] - ] - | None - ) = None, ) -> None: """Run the optimization. @@ -340,22 +320,6 @@ def __call__( This is mainly meant for internal development but allows you to use the NePS runtime to run your optimizer. - warmstart_configs: A list of configurations to warmstart the NePS state with. - This is useful for testing and debugging purposes, where you want to - start with a set of predefined configurations and their results. - Each configuration is a tuple of three elements: - 1. A dictionary of the samplings to make, i.e. resolution_context.samplings_made - 2. A dictionary of the environment values, i.e. resolution_context.environment_values - 3. The result of the evaluation, which is the return value of the `evaluate_pipeline` - function, i.e. the objective value to minimize or a dictionary with - `"objective_to_minimize"` and `"cost"` keys. - - !!! warning "Warmstarting compatibility" - - The warmstarting feature is only compatible with the new NEPS optimizers, - such as `neps.algorithms.neps_random_search`, `neps.algorithms.neps_priorband`, - and `neps.algorithms.complex_random_search`. - """ # noqa: E501 if ( evaluations_to_spend is None @@ -407,17 +371,6 @@ def __call__( "Only one is allowed." ) - if warmstart_configs: - warmstart_neps( - root_directory=Path(root_directory), - pipeline_space=pipeline_space, - warmstart_configs=warmstart_configs, - optimizer=optimizer, - overwrite_root_directory=overwrite_root_directory, - inside_neps=True, - ) - overwrite_root_directory = False - logger.info(f"Starting neps.run using root directory {root_directory}") # Check if the pipeline_space only contains basic HPO parameters. @@ -428,10 +381,8 @@ def __call__( # If the optimizer is not a NEPS algorithm, we try to convert the pipeline_space neps_classic_space_compatibility = check_neps_space_compatibility(optimizer) - if ( - neps_classic_space_compatibility in ["both", "classic"] - and isinstance(pipeline_space, PipelineSpace) - and not warmstart_configs + if neps_classic_space_compatibility in ["both", "classic"] and isinstance( + pipeline_space, PipelineSpace ): converted_space = convert_neps_to_classic_search_space(pipeline_space) if converted_space: @@ -576,195 +527,4 @@ def save_pipeline_results( ) -def warmstart_neps( - pipeline_space: PipelineSpace, - root_directory: Path | str, - warmstart_configs: Sequence[ - tuple[ - dict[str, Any] | Mapping[str, Any], - dict[str, Any] | Mapping[str, Any], - EvaluatePipelineReturn, - ] - ], - overwrite_root_directory: bool = False, # noqa: FBT001, FBT002 - optimizer: ( - OptimizerChoice - | Mapping[str, Any] - | tuple[OptimizerChoice, Mapping[str, Any]] - | Callable[Concatenate[SearchSpace, ...], AskFunction] # Hack, while we transit - | Callable[Concatenate[PipelineSpace, ...], AskFunction] # from SearchSpace to - | Callable[Concatenate[SearchSpace | PipelineSpace, ...], AskFunction] # Pipeline - | CustomOptimizer - | Literal["auto"] - ) = "auto", - inside_neps: bool = False, # noqa: FBT001, FBT002 -) -> None: - """Warmstart the NePS state with given configurations. - This is useful for testing and debugging purposes, where you want to - start with a set of predefined configurations and their results. - - Args: - pipeline_space: The pipeline space to use for the warmstart. - root_directory: The path to the NePS state directory. - warmstart_configs: A list of tuples, where each tuple contains a configuration, - environment values, and the result of the evaluation. - The configuration is a dictionary of parameter values, the environment values - are also a dictionary, and the result is the evaluation result. - overwrite_root_directory: If True, the working directory will be deleted before - starting the warmstart. - - !!! warning "Repeated warmstarting" - - When not overwriting the working directory, starting multiple NePS - instances will result in an error. Instead, use warmstart_neps once - on its own and then start the NePS instances. - - optimizer: The optimizer to use for the warmstart. This can be a string, a - callable, or a tuple of a callable and a dictionary of parameters. - If "auto", the optimizer will be chosen based on the pipeline space. - - !!! warning "Warmstarting compatibility" - - The warmstarting feature is only compatible with the new NEPS optimizers, - such as `neps.algorithms.neps_random_search`, - `neps.algorithms.neps_priorband`, and - `neps.algorithms.complex_random_search`. - inside_neps: If True, the function is called from within the NEPS runtime. - This is used to avoid checking the compatibility of the optimizer with the - warmstarting feature, as this is already done in the NEPS runtime. - If False, the function will check if the optimizer is compatible with the - warmstarting feature and raise an error if it is not. - - Raises: - ValueError: If the optimizer is not compatible with the warmstarting feature. - ValueError: If the warmstart config already exists in the root directory. - """ - if not inside_neps and check_neps_space_compatibility(optimizer) != "neps": - raise ValueError( - "The provided optimizer is not compatible with the warmstarting feature. " - "Please use one that is, such as 'neps_random_search', 'neps_priorband', " - "or 'complex_random_search'." - ) - logger.info( - "Warmstarting neps.run with the provided" - f" {len(warmstart_configs)} configurations using root directory" - f" {root_directory}" - ) - root_directory = Path(root_directory) - if overwrite_root_directory and root_directory.is_dir(): - shutil.rmtree(root_directory) - optimizer_ask, optimizer_info = neps.optimizers.load_optimizer( - optimizer, pipeline_space - ) - state = NePSState.create_or_load( - root_directory, - optimizer_info=optimizer_info, - optimizer_state=OptimizationState( - budget=None, seed_snapshot=SeedSnapshot.new_capture(), shared_state={} - ), - ) - for n_config, (config, env, result) in enumerate(warmstart_configs): - try: - _, resolution_context = neps_space.resolve( - pipeline=pipeline_space, - domain_sampler=neps_space.OnlyPredefinedValuesSampler( - predefined_samplings=config - ), - environment_values=env, - ) - except ValueError as e: - logger.error( - "Failed to resolve the pipeline space with the provided config:" - f" {config} and env: {env}.", - ) - raise e - - ask_tell = AskAndTell(optimizer=optimizer_ask, worker_id="warmstart_worker") - if pipeline_space.fidelity_attrs and isinstance( - optimizer_ask, - neps.optimizers.neps_bracket_optimizer._NePSBracketOptimizer, - ): - rung_to_fid = optimizer_ask.rung_to_fid - fid_to_rung = { - v: max(k for k, val in rung_to_fid.items() if val == v) - for v in rung_to_fid.values() - } - fidelity_value = env[next(iter(pipeline_space.fidelity_attrs.keys()))] - highest_rung = max( - [ - fid_to_rung[small_key] - for small_key in [key for key in fid_to_rung if key <= fidelity_value] - ] - ) - for rung in range(highest_rung + 1): - # Store the config for each rung - config_path = f"{n_config}_rung_{rung}" - - # Check if result is a UserResultDict by checking its structure - if isinstance(result, dict) and "cost" in result: - # This is a UserResultDict-like dictionary - rung_result = result.copy() - rung_result["cost"] = rung_result.get("cost", 0) / (highest_rung + 1) # type: ignore - else: - # This is a simple numeric result - rung_result = result # type: ignore - trial = ask_tell.tell_custom( - config_id=config_path, - config=config, - result=rung_result, - time_sampled=time.time(), - time_started=time.time(), - time_end=time.time(), - previous_trial_id=f"{n_config}_rung_{rung - 1}" if rung > 0 else None, - location=root_directory / "configs" / config_path, - worker_id=f"worker_1-{socket.gethostname()}-{os.getpid()}", - ) - trial.config = NepsCompatConverter.to_neps_config(resolution_context) - if (root_directory / config_path).is_dir(): - raise ValueError( - f"Warmstart config {n_config} already exists in" - f" {root_directory}. Please remove it before running the" - " script again." - ) - TrialRepo(root_directory / "configs").store_new_trial(trial) - assert trial.report - assert trial.metadata.evaluating_worker_id - state.lock_and_report_trial_evaluation( - trial=trial, - report=trial.report, - worker_id=trial.metadata.evaluating_worker_id, - ) - logger.info( - f"Warmstarted config {config_path} with result: {rung_result}." - ) - - else: - config_path = f"{n_config}" - trial = ask_tell.tell_custom( - config_id=config_path, - config=config, - result=result, - time_sampled=time.time(), - time_started=time.time(), - time_end=time.time(), - location=root_directory / "configs" / config_path, - worker_id=f"worker_1-{socket.gethostname()}-{os.getpid()}", - ) - trial.config = NepsCompatConverter.to_neps_config(resolution_context) - if (root_directory / config_path).is_dir(): - raise ValueError( - f"Warmstart config {n_config} already exists in {root_directory}." - " Please remove it before running the script again." - ) - TrialRepo(root_directory / "configs").store_new_trial(trial) - assert trial.report - assert trial.metadata.evaluating_worker_id - state.lock_and_report_trial_evaluation( - trial=trial, - report=trial.report, - worker_id=trial.metadata.evaluating_worker_id, - ) - logger.info(f"Warmstarted config {config_path} with result: {result}.") - - -__all__ = ["run", "save_pipeline_results", "warmstart_neps"] +__all__ = ["run", "save_pipeline_results"] diff --git a/neps_examples/basic_usage/algo_tests.ipynb b/neps_examples/basic_usage/algo_tests.ipynb deleted file mode 100644 index 0564f4928..000000000 --- a/neps_examples/basic_usage/algo_tests.ipynb +++ /dev/null @@ -1,217 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "938adc12", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\Users\\Amega\\Git\\neps\\.venv\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - } - ], - "source": [ - "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled, Integer, Fidelity\n", - "import neps\n", - "\n", - "# Define the NEPS space for the neural network architecture\n", - "class SimpleSpace(PipelineSpace):\n", - " int_param1 = Fidelity(Integer(1,100))\n", - " int_param2 = Integer(1,100, prior=50, prior_confidence=\"medium\")\n", - " int_param3 = Integer(1,100, prior=50, prior_confidence=\"high\")\n", - " cat = Categorical(['option1', 'option2', 'option3'])#, prior=0, prior_confidence='low')\n", - "global_values = []\n", - "def evaluate_pipeline(int_param1, int_param2, *args, **kwargs):\n", - " # Dummy evaluation function\n", - " global_values.append(int_param1)\n", - " return {\"objective_to_minimize\": -int_param2/50 + int_param1,\n", - " \"cost\": int_param1}" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "89427fd0", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "# Configs: 20\n", - "\n", - " success: 20\n", - "\n", - "# Best Found (config 17_rung_0):\n", - "\n", - " objective_to_minimize: -1.0\n", - " config: {'int_param2': 100, 'int_param3': 54, 'cat': 'option3', 'int_param1': 1}\n", - " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\basic_usage\\neps_test_runs\\algo_tests\\configs\\config_17_rung_0\n", - " cost: 1.0\n", - "\n" - ] - } - ], - "source": [ - "from neps.optimizers.utils.grid import make_grid\n", - "from pprint import pprint\n", - "from functools import partial\n", - "\n", - "# pprint(make_grid(SimpleSpace(), size_per_numerical_hp=2, ignore_fidelity=False))\n", - "\n", - "neps.run(\n", - " evaluate_pipeline,\n", - " SimpleSpace(),\n", - " root_directory=\"neps_test_runs/algo_tests\",\n", - " overwrite_root_directory=True,\n", - " optimizer=partial(neps.algorithms.hyperband),#, ignore_fidelity=True, size_per_numerical_dimension=5),\n", - " # fidelities_to_spend=40,\n", - " cost_to_spend=20\n", - ")\n", - "neps.status(\"neps_test_runs/algo_tests\",print_summary=True)\n", - "print()" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "c6197a65", - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAGdCAYAAAA44ojeAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAASxdJREFUeJzt3Qm8lGP/x/Ffe0ilXVpUpNKCSqVshYQs9fDwpCeVIrIU8mRpESJZo7IWqidCWf5EKgrtlDZUQtGm1FFo0fxf38tzj5k5M6dzTnPO3DPzeb+Mc2bmPjPX3DPN/bt/1++6rgKBQCBgAAAAPlIw0Q0AAACIRIACAAB8hwAFAAD4DgEKAADwHQIUAADgOwQoAADAdwhQAACA7xCgAAAA3ylsSWj//v32008/2eGHH24FChRIdHMAAEA2aG7YX3/91SpXrmwFCxZMvQBFwUnVqlUT3QwAAJAL69atsypVqqRegKLMifcCS5YsmejmAACAbMjIyHAJBu84nnIBiteto+CEAAUAgOSSnfIMimQBAIDvEKAAAADfIUABAAC+k5Q1KNkdyrRv3z77888/E90UAAlWqFAhK1y4MNMSAEkkJQOUPXv22IYNG+y3335LdFMA+MShhx5qRx55pBUtWjTRTQGQjgGKJnFbu3atO2PSRDD6MuKsCUhfyqbqpGXLli3uu+HYY4894ARRABIv5QIUfREpSNE4a50xAcAhhxxiRYoUse+//959RxQvXjzRTQJwACl7GsEZEoBQfCcAyYV/sQAAwHcIUBCV6namTJmS5TZXXXWVXXzxxeaH9n311VfWvHlzl7o/4YQTEtKmZPDdd9+5fbd48eJENwUA0qsGJSuPTvsm356rz9m1c7S9Dvbbt2/PFBR89NFHduaZZ9ovv/xipUuXDl736IBcs2ZNu+mmm6xnz55xa79GQR1xxBHBg1qNGjXsiy++8M3BP7R9MnDgQDvssMPs66+/thIlSiS0bQCAg5dWAUoq0YFY6xD9/vvv9vbbb1uvXr2sVq1a1qZNm7g8fqVKlczPItu3Zs0aO//886169eq5fkwVTzIE9cDYTwDyA108SapChQruIK3Mxo033uh+fv755zGHWZYvX95ee+214G3KhGhOCM8nn3xixYoVC84dE9qFoseWE0880d1+xhlnhD3+8OHD3WOVLVvWrr/+etu7d2+OuoVuvvnmsMfU73pN/fr1szJlyrjXOWjQoLC/CW2ffl+0aJHdc8897ndv26VLl1rr1q3dCA61TRmmnTt3ZmrLfffd54akH3fcccEukFdffdVOPfVU97dNmza1b775xhYsWGBNmjRxGZp27dq5YauxKOPVqVMnt9/1GBraOmbMmOD9t99+u9WuXduNNFMG7O677w7bb3oNeo9eeOEFq1atmnvO6667zk08OGzYMLdP9BlQ2yP3y6hRo1z79Lx67ND3PZply5a57fUcFStWtM6dO9vPP/8c9n707t3bvU/lypWztm3bZvl4ABAPBChJTsHH1KlT7YcffrBmzZpF3UYHrdNOO811D3kHz5UrV7rsi2o35OOPP3YH4mhDs+fPn+9+fvjhh65r5Y033gjeN3PmTJe90M8XX3zRxo4d6y4HS4+lLpt58+a5A7KCj2nTpkXdVm06/vjj7ZZbbnG/33rrrbZr1y53IFU3kAKLSZMmufbrQBtq+vTpLhulx37nnXfCuozuuusuF/RpBtJ//etfLmB6/PHHbfbs2bZ69WobMGBAzPYr4FixYoW99957bl8raNDB3aOlxrWftI0e89lnn7VHH3007DG0X/X3en//+9//2vPPP++yROvXr3fv14MPPujaqH0U+dwdO3a0JUuWuCDp8ssvd22IRt2KCuIUfC5cuNA916ZNm+yyyy7L9H4oa/Lpp5/a6NGjY75uAIgXunh8RAfIyPqJWFP1V6lSxf3cvXu3m/dFB3AFIbHoLPjpp592v8+aNcsdkHQWrqClTp067ufpp58e9W+VBRBlISK7VhQAPPnkk25iPD2ODqA66Pfo0cMORsOGDV2QIMo+6Dn0uGeffXambdUmBRHad177dMD/448/7KWXXnKBjugx2rdv7w7syhSI7nvuueeCXRbKoIiCHC9ToPqeK664wj1/y5Yt3W3du3fPMhBTwKh9rIyLHH300WH3K7Dw6D4938SJE10Q5NH7qgyKgpl69eq52iMFU++++64bMquMj16LgsPQ4PTSSy+1q6++2v0+ZMgQF3yNGDHCRo4cmamd2idq5/333x+8Tc+peYSUNVKWx3sPFCgmwt5NmzPdVqRihYS0BUD+IUDxER2AdKYdSmfHV155ZaZtdRavA5cCFGU4lBlQd4hqUaJR8KEDrboldPatgMULUHSw/eyzz8IOjtmlzIWCE4+6etS1crAUoITS427enPlAFYsyBo0aNQoGJ6LgQgd9HeS9AKVBgwZR6ylCnz9029DbsmqP3gdlMZSBOeecc1xX0imnnBK8/5VXXrEnnnjCZUnU7aR1o1RTFEqBi97j0OfUvg6dzyNaO1q0aJHpeqxRO8qyKMCJVlistnkBSuPGjWO+VgDICwQoPqKD6THHHBN2m9L50aguRKN6vCBBgYzqEWIFKDq4KoBRcKKLtlWAojNwdYGo/iH0AJpdmp0zsjtJQUAsOriqWypUtJqVnD5uboUGMLGe31sqIfK2rNqjmg7NWqpshzIYKl5WfY7qdebMmeO6XgYPHuyyNKVKlXLZk4cffjhmG7znjPd+UXDkZZUihdYoxdpPAJBXqEFJETqzVk1JLDqQqejzzTfftOXLl1urVq1clkAZGHX9qCsi1kHIyzDEY2VodRepTiRUXszJUbduXZcdUC2KR/UTXtdIftBr7dKli40bN84ee+wxe+aZZ9ztylZptNGdd97p9ru6TxTMxMvcuXMzXdf+iOakk05ynwdlaxQch14ISgAkEgFKklJaf+PGje7ApgLQl19+2S666KIs/0bdOiq21OgQpfR1sFbdyvjx42PWn4hGi2hEiFdAuWPHjly3WwWZKsZUbciqVatcnYlGkcSbMhSaI0YBgh5f3Rg33HCDG6HiddnkJRXQKhhUMa0CANUXeUGCAhLVqChrom4UdfVMnjw5bs+tz4PqSFRDov3rdQFGo6zOtm3bXI2NMmlqz/vvv29du3aNS0AKALlFgJKklAVQCl5nuhqyes0117hCyKwoCNFBJ3JIb+RtkVSAqoOoMi0ajnugQCgr6tLQKBPVu2jU0K+//mr//ve/Ld40GkkHWh189Tz/+Mc/XDeLikLzg7JO/fv3d1kqBYHKcCkgkQsvvND69OnjggYFi8qoaJ/Ei7qO9Fx6bgWCCkpVZBuN3k9llvQZUK2MugI1nFjdh6xdAyCRCgQiCwKSQEZGhuu315l8ZGGhRm5oSXXVaLBiKdKNuvKUjUnUEgR+HsXDdwPg7+N3JE6RAACA7xCgAAAA32GYMZBCkrDHFgCiIkABkNCaEmaFBRANXTwAAMB3CFAAAIDvEKAAAADfIUABAAC+Q5EsgINC0SuAvEAGBUnpqquuOuBsqR999JGbWXX79u2W6PZp+G/Pnj3ditJqU14skJgqtOyCptsHkN7SK4Myc2j+PdeZ/XN8QHvxxRdt6NCh9p///Cd4+5QpU+ySSy7J0fwW+oL/+OOP3e/FihWzmjVrunVfrrvuOneb1l156KGHbOzYsW6xQS0EqAXsevToYVdffXXMg/2ZZ55pv/zyi1unJZRWwtUBxTuo6Lq3Oq/Wc9HifO3atbPhw4fbEUccYfHw+OOPh+0TvWata6NVg/0gsn1aaFH7W/tR70e5cuUS2j4A8DsyKD6i9UEefPBBFwQcLAUbGzZssBUrVthll13mVq3VonHeYnKPPvqoDRkyxN2vlX51dh/PTMM999zjnl+r9mq15FmzZtmNN94Yt8fXWg6RgZKfRLZPqwRrccdTTjnFKlWq5BZgzCkFPPv27YtzS1OPAvD9+/cnuhkA8jNAGTVqlFshVQv86NKiRQt77733whbj0oGwbNmyVqJECevYsaNt2rQp7DF0wDr//PPdarMVKlSw2267jS/d/znrrLPcwUtZlKx88sknduqpp7rMR9WqVd2Bf9euXWHbaP/qsXS2PmjQIJcheeutt9x9+qlsyqWXXuoWTmvUqJF1797dbr311ri9lsMPP9w9/1FHHeUyL126dLHPP/885vZ67gsuuCB4XZkQdYUo8+DRys3PPfdcpi4U/a6MkbIW+htdvvvuu+DfLVq0yJo0aeL2iQKEr7/+OkfdQuqOCX1MZUIUfGi15Lp167rP+rnnnusCMk9k+2644Qb32dfjKMMku3fvdu+d/h0oOG3VqpUtWLAgU1v0b6xx48YuG6b3XtkiPZ4yVspIKUP17LPPus9A165d3b7Xvgr9txnNyJEj3edCz63H0IrPHu13tUevU/+e9d4oyPJoX6htr776qp150YVW8ujq1qJtW/tmzRr3GrS/tV+UOduyZUvYful4VRcbMny4Va5Xz8oeU8uuvfZa27NnT8x2aj/dPmiQHX1CIytd42hr2e5ct2883vuhz7VWbdZ+0r4GkEYBSpUqVeyBBx5wX/gLFy601q1b20UXXWTLly9392sJ+bffftsmTZrkDhg//fSTdejQIezMRsGJvoy0xLy6NPTlMmDAgPi/siRUqFAhu//++23EiBG2fv36qNvoIKGDoYK/L7/80l555RV30FIXTlYUzHgHAQUOM2bMCDtw5KUff/zRfS6aNWsWc5vTTz/dvQ59RkSfH3WDeAciPYZeuw7OkRSYKFj2ska6KHDz3Hnnnfbwww+7z6wyF926dTvo1/Tbb7+5LquXX37ZZYd0QIwV4Kl9yijp34/a5gUh/fr1s9dff939O1DwpqCibdu2tm3btrC/V5ef/t2tXLnSnSCI/kb7Z/78+S5Y6dWrlws4FYDpsc455xzr3Lmza2c02hcKjtQuBWwKSE477bTg/Qp2+vbt67abPn2666pTV2NkZmLgwIHW/+Y+Nu+DaVa4cCH7d69e7nXpNc+ePdtWr16d6d/3zNmz7atV39i0N96wl0eNtjdee80G9Ovnim2jrVx80x39be6ihTZu9NO2aOZH1rH9he7fwKpVq8LeD2UfFcDq++iIwF/Fu7EeE0ASCBykI444IvDcc88Ftm/fHihSpEhg0qRJwftWrlypTvjAnDlz3PV33303ULBgwcDGjRuD24waNSpQsmTJwO7du7P9nDt27HCPq5+Rfv/998CKFSvcz0xm3J9/lxzq0qVL4KKLLnK/N2/ePNCtWzf3++TJk91r9XTv3j3Qs2fPsL+dPXu226/eaz799NMDN910k/t93759gZdfftk9xpNPPuluW758eaBu3brubxo0aBC45ppr3HuTlZkzZ7rHOOywwzJdChQoEHj00UeD21avXj1QtGhRd1/x4sXd3zVr1izwyy+/xHx83af2LFiwILB///5AmTJlAkOHDnV/J+PGjQscddRRUfdX5GuObPOHH34YvO3//u//3G1RPx8hfxPa1i+++MLdtnbtWnd9zJgx7vrq1auD2zz11FOBihUrxmyf9o/2i2fnzp3u38v48eODt+3ZsydQuXLlwLBhw8LaMmXKlLA26rW2atUqeF3vsfZ1586dg7dt2LAh7N9epNdff939u8vIyAhkx5YtW9zjLV261F3XvtB1/dvfs3GTu7w8erS7bfr06cG/03t43HHHhe2XMkccEdj+7drg3z354LBAicMOC/zx0wZ3PfS9/P777wOFChUKfLd4SXB7Xdq0aRPo379/2PuxePHiv/dlyLa6HPC7AUC+yOr4HSnXNSg60504caI709LZq7Iqe/fudd0Unjp16li1atVszpw57rp+NmjQwKWTPTpjzMjICGZhYqV4tU3oJZXpTFBnyDpjjrRkyRKXdVL63LtoH+rMdu3atWHpe92nzIkyC8pu6SxblAZftmyZzZ0712UTNm/ebO3bt49ZIBtKZ8Xq8gi9VK5cOdN26rrTfcry6AxclD3zMiSRlKJXV5MyJkuXLrWiRYu6upgvvvjCdu7c6TIqyrLkhpd1ENWBiF7zwVB3Ua1atcIeNyePqWyQ/r20bNkyeFuRIkXs5JNPzvS+q7skq9ekzJu6YfRvy+P9G4vVprPPPtuqV6/uugCVaVGdUGi2RdmJK664wt2v7lyvWyqy6yS0HRXLlXc/I9sR2YaG9eq5/edp1qSJ7dy1y9b9+GOmduqzoM/M8ae0sCNq1ghe9HkI7XLS5yW0LQCSX44r9fSFoYBE9SY6AE6ePNkd8HQw0pdEZOGivqA2btzoftfP0ODEu9+7LxbVZKiwM10o1a6go3///q7PPpQO1tdcc03UglMFg55OnTq5rg0FKDp4KkUfStebNm3qLqplGDdunDtQ6W9UlxKL7ot8j6MVfKr7QV0WojoH1ZToc6OC3NAgNpS6bxSgqIZAwYiG5KrGQ10/OiDdcsstlhs68HtUNyGxiii9/RQ6AkeBRFaP6T1uXq0kfNhhh2Xr+XPyOlWnoq4g7e8PPvjAdcOoVkndT3p/FbAqgFFtiwJQPU79+vUz1YpEe87I2w6mYFWfdwVgcz+Y5n4Gn7fcX3VuHn3OvecHkKYBynHHHeeCkR07dthrr73mih+9Ia15RQdq9Yd7lEEJrTFIRao50LBZ7e9QJ510kht54x38sxpFcqBtQinIlMhi23jxDi6///57zG0UlLzwwgsu4FGNgRe0aPTRN998E7X+xKPgOFZ2JifKl/8rC6BaEW9IdF7MWaLsi9r86aefukDAC4QUIOTXHCDazwoWdVEtiQIT1SbpfVBdioITFWOLgsR4+XLFCvc5UFAh8xctshKHHWZVjzoq07Ynnniie1+3/PyztWrePHg7k8EBqS/HAYq+VL0Dn0YW6AtVBXH//Oc/3dmVRj+EnmFrFI+KMkU/VdQXyhvl420Tjc6odUknSpMrC/LEE0+E3X777bdb8+bNXVGsumR0dq2AZdq0afbkk09m67E1WkNdC96QV3UNKQisXbu265aLh19//dVlxZRVWLdunSuc1MFfz5lV5kh/984777gATRSUqL3KAql9sagLYt68eW50ic6slX3JDX22Ffwqm3Dfffe5wEgFtvGm901dbuoKU1uV/Ro2bJjrZtGIqrymffztt9+6fa5A7N1333WZDgXEuq4uo2eeecbtd3XrhM7Nc7D0PdGzbx9XXPv9unV2z0PDrFe3bpmyfKL3/IqOHa3bDb3twUGD7YT69e3nrVvt48VfuC4ddRsCSE0HPQ+KvtRUI6JgRaldr95AdBamLzel9kU/1UUU2ietA6v6uL0zePxNIywi0+P6UlbGSgdOnd3qDFPp+Wh1ILGo+0ijapTG1wFAWTAFJkr152Z+jmjUJh3c1C4NUdUBWY+vA18sOjAqMFMg4wVKOoBqHxyo/kQjaJSl0edIf5/bYab6DCtj89VXX7l9rXqge++91/KCgjCNxlLXmjJjGvGiocvxmswuKzqJeOONN9xIPHWjjR492r3u448/3gUKqi9TXZm6dVS/pIn94uXMU0+1Y2rUtDaXXGydrulpF7RtawNuvS3m9s899rh1uvQyu33QQKvfqqX9o+tV7sQotEsTQOopoErZ7G6ss2zNa6AvBp3pTpgwwX2B60tVRXc6I9SZmIo4FXRo+KNoSLEoVatuCx20dLaoM2x9OSsToOG12aUuHnVhqJtJzxNKtTHKCKhWQvM7AIif7AzZzar7RTVV2zZtstfHvpjtv4/2nAfq4om2PhDfDUDiZXX8jpSj02VlPv7973+7/nk9gc4wveBENDupzr50Vqisis7UNZrEozNcpZYVyCiborNqnb0rUwAAAJCrAOX555/P8n6dlTz11FPuEosKApVlAQAAiCW9FgsEkFDq/mVmVwDZwWKBAADAdwhQAACA7xCgAAAA3yFAAQAAvkORLIC4oggWQDyQQQEAAL5DgIKotDLslClTstxGs4JefPHF+dYmrbejVZHThVYa1vug9a0AIN2kVRfPyMV/z2qb16474bocba+DvQ5EkUGBDlJnnnmm/fLLL279FO966OR4NWvWtJtuusl69uwZt/aHruarBfg0PfgXX3zhlipIFK2/otmHAQCpL60ClFSihRi1joGWrdfCf1o+oFatWtamTZu4PH5Wq0vnN61+q1W0tQhgPB4nnaTjawaQGujiSVIVKlRwQYQyGzfeeKP7+fnnn0fdVutB6uD+2muvBW9TJkSrDXs++eQTK1asmP3222+Zunj02KKVk3X7GWecEfb4w4cPd4+llYqvv/5627t3b8x2Dxo0yD33008/bVWrVrVDDz3ULrvsMrdwVGTX0X333ecWljzuuOOidvFoxeKLLrrISpQo4YI1Pc6mTZsyPddzzz2X5QJx33//vVvZWRkjZWi0oq+3HIMWuOzevbv7+0MOOcS15fHHHw/7e6+9WvCyYsWKLtOl9aX27dtnt912m5UpU8aqVKliY8aMCf6NslLal1o1+JRTTnFt08rBWqk6K3qftIq12qL9p/d+165dwfu1j4YMGeLWzNI+iWdWDQDyEwFKklPwMXXqVHewbtasWdRtdCA87bTTXPeQqLto5cqVLvvy1Vdfudt0YGzatKkLGCLNnz/f/fzwww9d188bb7wRvG/mzJm2Zs0a9/PFF190U5nrkpXVq1fbq6++6jI/aru6jq67LrxLbPr06S5LNG3aNLfAZKT9+/e74GTbtm2u7dru22+/tX/+85+Znuv11193bV68eHHU9iio0uKWs2bNsqVLl7oVuhX0eM+j4GLSpEm2YsUKGzBggN1xxx2u/aFmzJhhP/30k3uMRx55xAYOHGgXXHCBC3rmzZtn1157rV1zzTW2fv36sL9TAHPLLbe4faAFNBUobd26NWo7tZ/PPfdctxjnl19+aa+88ooLWHr37p0pYGzUqJF7zLvvvjvL9wIA/IouHh/Rgdg7MHp0Bh+NDpqiA6sOojpjVxASi7IeylqIDqLKhigDo6ClTp067ufpp58e9W+9rhVlSCK7fnQAfvLJJ91K1Xqc888/3wUXPXr0iNkWLXv/0ksv2VFHHeWujxgxwv3dww8/HHx8ZTKU+YjVPaHnUDCxdu1al0kQPaayH6pVUbDldXHo9qy6hxTc6aDfoEEDd101PZ4iRYrY4MGDg9eVSZkzZ44LUJSx8ShL8sQTT7jVvJVlGTZsmMtGKZiR/v372wMPPOACissvvzz4dwou9NwyatQoF7BpUc5+/fplaufQoUOtU6dOdvPNN7vrxx57rHtOvW/6Wy9D1Lp1axf05GZIcJGKFbL1dwCQ18ig+IiKX3WWH3rRQTqa2bNnh22j7gUdpGLRQUwZgC1btriMgwIWXRSYqEvms88+y9R1kx0KCBSceNTVs3lz1vNgVKtWLRiciDIHCrKUMfEoWMiqdkIZIAUmXnAi9erVc90rui909ewD1a6om+Tee++1li1busyHshOhtDp348aN3eMogHzmmWdcUBO5HxSceNTV4wU8on2kAC9y3+i1ewoXLmxNmjQJa3+oJUuWuOyU2uBd2rZt6/adAjWPHgMAkh0Bio8oa3DMMceEXUIP5KF0Jq/7dWDs2rWrde7c2dVsxKKDpc7yFZyEBij6XRkHBSmqhcgpZRgiu5N0wDxY8Rqtk53Hufrqq133kPahsjI6wCurI6oRufXWW10dygcffOACQu1vZWYOtB/ivW927tzpuolCA1gFLatWrXIF0jl5zQDgdwQoKUJn6KopiUUHRxVXvvnmm7Z8+XJr1aqVNWzY0HURqetHB+VYBzYvkxGruymnlH1QvYZn7ty5wa6R7Kpbt66tW7fOXTzKEGmotjIpOaVMjOpEVKui7pFnn33W3f7pp5+6wE01MuoWU1CoWpB40Wv3qKh20aJF7rVFc9JJJ7nXGBnE6sJInQN3Ze39eav9mfGrbRs3LtHNAZANBChJSl0FGzdudCNQVMD58ssvu6LRrChj8t///teNbFH3gIIC1a2MHz8+Zv2JN2JIo0ZUH6FRMqEjbnJDtRJdunRxZ//qqlIXi+o5cjK0+ayzznJZIdVkaPSSCnk1ckWvI6ddHKrpeP/99103iR5LBb9ekKA6j4ULF7r7v/nmG1d0qoxTvKj7aPLkya5YWcW6KmDu1q1b1G1vv/121xWnuhVlT5Q5UcAZWSQLAKmAACVJKdugeg+dPevApdS/1y0Riw7eyoKE1pro98jbIqk2QsWYyrRo2O+BAqEDUZs7dOhg5513np1zzjkukzNyZM4m0VNGSAdnFekqyFLAouJWjWzJKb1+BQcKSjRKpnbt2sH2aL+qrRodpFFSGmETOeLoYKhwVheNulEB7VtvvWXlypWLuq32k7rkFCgpG6aMjkYV6T0BgFRTIKBxqkkmIyPDSpUq5c7kNddD5AgRnQlnNe8FEkdzk2h+lVhDftOFX2bnzekonvxYCDCyDdGeMzft/GPvXvv+xx+tzNIvrfLVV8ehpQDiefyORAYFAAD4DgEKAADwHQIU5HsXT7p373hT0qt3NZHdOwDgZwQoAADAdwhQAACA77AWD4CkwxpCQOojQAHgK/kxlBmA/9HFAwAAfIcABQAA+A4BCpLSVVddZRdffHGW23z00UduSnwtIJhfQ6jTbdjwsU2a2BPPPJ3oZgBIQWlVg7JlxJP59lzlb+id4wPuiy++aEOHDrX//Oc/wds1Lfwll1zi5szILq2rozVbpFixYm6NGi0o560ho7VnHnroIRs7dqxbbFALAWpRvB49etjVMaYA18H+zDPPdIvZlS5dOtOcHlpwTxfvuh5XtCBhxYoVrV27djZ8+HC3dk48PP7442H7RK9ZwcFjjz1miXLrrbfaDTfckLDnB4BUQgbFR7R20IMPPuiCgIOlYGPDhg22YsUKt1KwFsPTSsYyePBge/TRR23IkCHufq3e27Nnz7hmGu655x73/D/88INbLXnWrFlu1eJ40VoOkYFSoihQ2rdvn1shumzZsgf1WHv37rV0smfPnkQ3AYBPEaD4iFbkrVSpksuiZEWr3mo1W2U+qlat6g78u3btCtvm0EMPdY+l7Im6HpQh0Uq5op/Kplx66aVuwTqtpNu9e3eXAYiXww8/3D3/UUcd5TIvXbp0sc8//zzm9nruCy64IHhdmRB1z0ydOjVsFeTnnnsuUxePflfGSFkV/Y0uWozPs2jRImvSpInbJ6eccop9/fXXMduhv9PfT5w40W2roLF+/frBjFRo19F7771njRs3dlkqvSeRXTz79+93gVqVKlXcNrov9PV4z6UVmLXStJ5LwVy0AEiPXa1aNfc4Wr04NNh7+eWX3evz9vm//vUv27x5c6b2vv/++24FZH1uWrdu7baZOn26NTi1lZU9ppZ17nWt/fbbb2FZKWXedFFAqFWWBz74QJbZvO07dtg1fftY5Xr13GOe07GDLVm+PHj/PQ89ZE3atLYXxo+z2k2b2OHVq8V8LADpjQDFRwoVKmT333+/jRgxwtavXx91mzVr1ti5555rHTt2tC+//NId3HRw1EEkKzooeWerOojNmDHDtmzZYvnhxx9/tLffftuaNWsWcxsdoPU61P0kCgh0QNTB1XsMvXYdNCMpMGnRokUwa6SLAjfPnXfeaQ8//LAtXLjQChcubN26dTtgm2+77Ta75ZZb3GrDeuz27dvb1q1bw7ZRV9wDDzxgK1eutIYNG0Ztl55XXVt6r9q2bWsXXnihrVq1KtPj3HTTTe5xtE2k119/3WW8nn76afe36vZr0KBBWNZF2bAlS5a4+xT4KGiLpCDnySeftM8++8zWrVvnMmsjnn3GXho5yt4cN94+/Phj99kLpW5H7bP58+e71/P46NEuuIjlih5X2+aff7a3J0ywuR9MsxMbNLRzL/2HbQvJCq5Zu9Ymv/N/9soLY2zB9OkxHwtAeiNA8RnVm+hMe+DAgVHvV3alU6dOrt5DWRGd5T/xxBP20ksv2R9//JFpex3wx40b5w6QOmuWRx55xAUnClR0YL322mtdNiA7lA1QV0boRd04kW6//XZ3nwIj/Y3O4PW8sSgj9Ouvv7qAQGfo6hJSgOAFKPqpbIyyKJF0dl+0aNFg1kgXBXue++67zwVA9erVc8GADtDR9lUoBXwKAuvWrWujRo1yz/H888+HbaPsyNlnn221atWyMmXKZHoMBSbaD5dffrkdd9xxrvsuWp2M3ssOHTq4bNaRRx6Z6XG0f/WalGFTFuXkk092wZhHAZdqfJQta968ufs86P3cuXNn2OPce++91rJlS5dFUcZMQeCIBx+0Exs0sFbNm1uHCy5w3X2hFOgpOFL79bm7rnt3e/zpZ6Lus0/nzbMFX3xhE599zhqfcIIdW7OmPThokJUuWdLeeOft4HZ79u61F0aMcM/bsN7xWb4PANIXAYoP6UCmM1edUUfSWbKKW0MDBJ11qzth7dq1we1GjhwZDBB0MOvTp4/16tXL3acD9bJly2zu3Lnu4KZUvzIEsQpkQ82ePdst9hd6UZdDtAyE7lNgNP1/Z8nnn39+MEMSSfUk6mpSILJ06VIXcKguRgGLDrQ6mCrIyI3Q7IYXAIR2gUSjrIlHGQR1oUS+H7otloyMDPvpp59cQBBK13PyOKKuuN9//90FIHovJ0+e7GpeQruw9P4peFE3j7efIgPH0P2gwmUFdDWrHx28rUK58pn2y8mNGtm+zVvc5Gm6NG/SxFav/Tbq+/jl8uW2c9cuq1S3jh1Rs0bwsvaHH2zNd38VTUv1KlWsfLlyWb5mAEirUTzJ4rTTTnNBR//+/TOl6nWwvuaaa6IWnOoA5dHZrro2FKDooKzRNKF0vWnTpu6iM3hlWTp37uz+Rmfysei+yOJUHcAjqXvGy3Yo06OsgQ76OkNXJiAadd8oQFGdhQ6yykoog6GuHwUoyqjkRpEiRYK/K5MjCugO1mGHHXbQj5Gdx1EWQ3UzH374oU2bNs3VD2kUlvaJuu30WdFF9Svly5d3gYmuRxagRu6H0OvebQezXxScHFmxok17Y3Km+5RF8SgwAoADIUDxKdU2qDtAqfVQJ510kht5E62rI5S6JA60TShlVSSy2DZevC4XZQJiUVDywgsvuIBHdTZe0KLRR998803U+hOPMi6xsjO5oeySAkVRtkJZigPV+YQqWbKkyyx9+umnYZkfXVcXTU4p0FSWRBeNyKpTp47LNKk7TLUx+rx4dTeqtYmX+Z9/EXZ93qJFdkyNmmFdaJ4TGza0jZs3W+FChezokGAZAHKDAMWnVASpLIjqCUKppkF1BjpYqktGZ98KWHRmrQLI7PjHP/7huhpUv6LaBnUNKVtTu3Ztd+CLB9WTbNy40R1AVZDZr18/d3av54xFAYH+7p133nEHXFFQovYqC6T2xaK5V+bNm+cKRNW1Fa0mJCeeeuopl/lRBkc1GBr6nZ3i2shuLtUSqUZFweaYMWNct1e0kTpZUZeegi8VGSv7oGyXApbq1au7jIeCMxW3qpZIXXcqmI2XdT+ut9sGDrCrO//bvlj6pY18/nkbNmhw1G3bnHaa6wL6R9erbOjdA1wNyoZNm+zdD6fZxe3Oc3UpAJBd1KD4mIowI1PuqiNQal8ZBRWWquBxwIABUetAYlH6X6NqdDaug76GACsw+eCDD6J21+SG2qSgQu3S8GEFUnr8rOYJ0SRuCswUyHiBkoIW7YMD1Z9omLLO6pUJ8ro5DoYCJF1UF6MuJg3NVrdVTqgbrm/fvq5rSq9LQ4z1OAp8ckJdas8++6wLKvX+q6tH75/2pV6rAphJkya51642qzg3Xq689DL7/fc/rGW7c+2m/v2ttybz69w56rbqInpr/AQ7tXlz63HzTXZ8y1PsymuvsR/Wr7cK5cvHrU0A0kOBQE6mKPUJFSCqC2PHjh0ulR5KozOUEVCthOaVAHJCGRh9dlScmw7T1keuHFykYoXg78peNaxd2x4ecq/5XWi7Y62I/Mfevfb9jz9amaVfWuWIgvDIWaZzOhM0gIM/fkeiiwcAsrEsBkELkL/o4gEAAL5DBgWIKLZNwl7PPKEh39G6SgAgPxCgAEg7+bmyOYB86OLRNOua2EuzVVaoUMEt1ha58JoK67wF27yLhj+G0ggLzSqqIZN6HA3HDJ0ZEwAApLccZVA0vFWTRClIUUBxxx132DnnnOPm4QidDVPTcWuIbLSZIzWfg4ITzb+hNVG0sNu///1vN6ulFsqLF9L0AEIFTN8JAfcfgBQLUEKXihfNv6AMiGbZ9GbdFG/Rtmg0F4YCGs3loPVANJRTE0tpAjKttqpJpw6GN323lo3XZFYAIH/s2as1DqxQxBIAAFKwBkXjmCVy1k7NlKnZLhWkaDKwu+++O5hFmTNnjpu0SsFJ6MRhWshu+fLlbuKxSLt373aX0HHUsWiyLk1s5S16puf11l8BEG7v3r1h1/+MWOU58n6/yqrdypwoONmydasV37DRCsZhHSYAPg5QNLunFpnT7Jb169cP3v6vf/3LTcGtGUS1kq0yI6pTeeONN9z9mv48NDgR77rui1X7Mnhw9Om1o/GyNwdasRZId39m/Bp2vdCvGVne71dZtzvgMicKTkr++GO+tw1APgcoqkXRuh+aBjxUz549g78rU6Lpztu0aWNr1qxxa5LkhtaJ0ZThoRkUb2G0aJQx0fOq+ylZzgCBRNg2blzY9TJXXpnl/X6VZbsD5rp1yJwAaRCgaKE6Leg2a9Ysq1KlSpbbaoEzWb16tQtQlN2YP39+2DabNm1yP2PVrRQrVsxdckrdPdFWXQXwlyK/h3eNRC4PEXm/XyVruwHEaZixRsYoOJk8ebLNmDHDrVlyIFq9VZTRkBYtWrhl4kO7X7QSr+bk12JnAAAAhXParTNhwgR788033VwoXs2IFv7RiBl14+j+8847z620qhqUPn36uBE+WoVVNCxZgUjnzp1t2LBh7jHuuusu99i5yZIAAIA0z6CMGjXKjdzRZGzKiHiXV155xd2vIcIaPqwgpE6dOm6Z+Y4dO7ql4T3qclH3kH4qm3LllVe6eVBC500BAADprXA8Jz9T4aomczsQjfJ59913c/LUAAAgjbCaMQAA8B0CFAAA4DsEKAAAwHcIUAAAQGqtxQMA6WrLiCfDrpe/oXfC2gKkIjIoAADAdwhQAACA7xCgAAAA3yFAAQAAvkOAAgAAfIdRPEAKY6QJgGRFBgUAAPgOGRQAyEU2CkDeIoMCAAB8hwAFAAD4DgEKAADwHWpQgBSqiWCUDoBUQQYFAAD4DgEKAADwHbp4gBTCUFgAqYIMCgAA8B0CFAAA4DsEKAAAwHeoQQGAOGBhRiC+yKAAAADfIUABAAC+Q4ACAAB8hxoUII0wTwqAZEEGBQAA+A4BCgAA8B0CFAAA4DsEKAAAwHcIUAAAgO8QoAAAAN8hQAEAAL5DgAIAAHyHidoAIA+weCBwcMigAAAA3yFAAQAAvkOAAgAAfIcABQAA+A4BCgAA8B1G8QBJOioEAFIZGRQAAJDcAcrQoUOtadOmdvjhh1uFChXs4osvtq+//jpsmz/++MOuv/56K1u2rJUoUcI6duxomzZtCtvmhx9+sPPPP98OPfRQ9zi33Xab7du3Lz6vCAAApFeA8vHHH7vgY+7cuTZt2jTbu3evnXPOObZr167gNn369LG3337bJk2a5Lb/6aefrEOHDsH7//zzTxec7Nmzxz777DN78cUXbezYsTZgwID4vjIAAJAeNShTp04Nu67AQhmQRYsW2WmnnWY7duyw559/3iZMmGCtW7d224wZM8bq1q3rgprmzZvbBx98YCtWrLAPP/zQKlasaCeccIINGTLEbr/9dhs0aJAVLVo0vq8QAACkVw2KAhIpU6aM+6lARVmVs846K7hNnTp1rFq1ajZnzhx3XT8bNGjgghNP27ZtLSMjw5YvXx71eXbv3u3uD70AAIDUlesAZf/+/XbzzTdby5YtrX79+u62jRs3ugxI6dKlw7ZVMKL7vG1CgxPvfu++WLUvpUqVCl6qVq2a22YDAIBUDlBUi7Js2TKbOHGi5bX+/fu7bI13WbduXZ4/JwAASLJ5UHr37m3vvPOOzZo1y6pUqRK8vVKlSq74dfv27WFZFI3i0X3eNvPnzw97PG+Uj7dNpGLFirkLAABIDznKoAQCARecTJ482WbMmGE1atQIu79x48ZWpEgRmz59evA2DUPWsOIWLVq46/q5dOlS27x5c3AbjQgqWbKk1atX7+BfEQAASK8Mirp1NELnzTffdHOheDUjqgs55JBD3M/u3btb3759XeGsgo4bbrjBBSUawSMalqxApHPnzjZs2DD3GHfddZd7bLIkAAAgxwHKqFGj3M8zzjgj7HYNJb7qqqvc748++qgVLFjQTdCm0TcaoTNy5MjgtoUKFXLdQ7169XKBy2GHHWZdunSxe+65h3cEAADkPEBRF8+BFC9e3J566il3iaV69er27rvv5uSpAQBAGmEtHgAA4DsEKAAAIDWGGQNAXlqwcUGm25pWapqQtgBIDDIoAADAdwhQAACA7xCgAAAA3yFAAQAAvkORLADEAYW9QHwRoABAPtgy4smw6+Vv6J2wtgDJgC4eAADgOwQoAADAdwhQAACA71CDAiApi1ApQAVSGxkUAADgO2RQAMQNQ20BxAsZFAAA4DsEKAAAwHcIUAAAgO8QoAAAAN8hQAEAAL7DKB4giTE3CIBURQYFAAD4DgEKAADwHQIUAADgOwQoAADAdwhQAACA7xCgAAAA32GYMQDEaWFEAPFDBgUAAPgOAQoAAPAdungA5ClmuwWQG2RQAACA75BBAXxqy4gnE90EAEgYMigAAMB3CFAAAIDv0MUDAD7t0it/Q++EtAXwAwIUADgAJmUD8h9dPAAAwHfIoABAknYD0QWEVEaAAiDh3SVM3gYgEgEKkOIICAAkIwIUAMEg5rvFI4O3XXfCdQlsEYB0R4AC5BPqB2JL51EyI/8XFB79v31Adgv4CwEKgJgHTu+g6ccDZ2hQ42V+cpP1oQsMSJFhxrNmzbL27dtb5cqVrUCBAjZlypSw+6+66ip3e+jl3HPPDdtm27Zt1qlTJytZsqSVLl3aunfvbjt37jz4VwMAANIzg7Jr1y5r1KiRdevWzTp06BB1GwUkY8aMCV4vVqxY2P0KTjZs2GDTpk2zvXv3WteuXa1nz542YcKE3LwGIC2kczdIOolXZghIuwClXbt27pIVBSSVKlWKet/KlStt6tSptmDBAmvSpIm7bcSIEXbeeefZ8OHDXWYGSEeRNSrJEJD4vY1+b9+BHD1pnvu5Zfb+RDcFSI0alI8++sgqVKhgRxxxhLVu3druvfdeK1u2rLtvzpw5rlvHC07krLPOsoIFC9q8efPskksuyfR4u3fvdhdPRkZGXjQbQAof7AGkeYCi7h11/dSoUcPWrFljd9xxh8u4KDApVKiQbdy40QUvYY0oXNjKlCnj7otm6NChNnjw4Hg3FQDyNaij+BZIYIBy+eWXB39v0KCBNWzY0GrVquWyKm3atMnVY/bv39/69u0blkGpWrVqXNoLpNJKtGQ5AKSKPB9mXLNmTStXrpytXr3aBSiqTdm8eXPYNvv27XMje2LVraimJbLQFkjFoAYAkE8Byvr1623r1q125JFHuustWrSw7du326JFi6xx48buthkzZtj+/futWbNmed0cAPDVCB0AcQpQNF+JsiGetWvX2uLFi10NiS6qFenYsaPLhqgGpV+/fnbMMcdY27Zt3fZ169Z1dSo9evSw0aNHu2HGvXv3dl1DjOABAAC5mqht4cKFduKJJ7qLqDZEvw8YMMAVwX755Zd24YUXWu3atd0EbMqSzJ49O6yLZvz48VanTh3X5aPhxa1atbJnnnmGdwQAAOQug3LGGWdYIBCIef/7779/wMdQpoVJ2QAAQNwyKAAAAHmNAAUAAPgOqxkDaYj5UgD4HRkUAADgO2RQAMDnmDIf6YgMCgAA8B0yKEAOjIyY/fO6E65LWFsAIJURoABABIqIgcQjQAF8gANifDNcR7M/gaRHDQoAAPAdMigAkAKoj0KqIYMCAAB8hwAFAAD4DgEKAADwHWpQgARg1A4AZI0ABQCQ3GYODb9+Zv9EtQRxRIACAElqy4gng79r7pfvLm2W0PYA8UQNCgAA8B0yKEhLkXNGCPNGAMynAv8ggwIAAHyHDAoQ57VfmlZqmsAWAUBqIIMCAAB8hwwKkIbW//Jb2PUqRxyasLYAQDQEKEAuHD1pXtiBfn3IfU0rHbgbCACQNbp4AACA7xCgAAAA36GLJw89Ou2bsOt9zq6dsLYAAJBMCFCA/2GCKiB/pc1JXORaQcJ6QQdEgAIAKSgZZktu/sMzEbcMT1BL4EcEKOmEFT8P3trZf/3cvuF/N5RPZGuQCwyxTsFsRJykTUYnSRCgJAMCCyBfEcRkje5Q5AcClGQMCOjPzBedXx+S6bYWjHtLeiV3e9kvTy1LNguYUyePMjEdLVEiszeS7hkcAhSkd1dNqBqnxvXg8V2UGgAgFWpZUqVLB/5GgJIi4tV3mox9sMnYZiAvuqXmrNkavN6iVllLNlGzCByl0hZvfYqgGt7nZ4FAmosWfDRPSEuQLOhRBwAAvkMGBSmHbAkAJD8CFCCGKhmLMt227n8/y/yx769fiuVvm1JxCK8wjDef+H2EIhJnpv9GhxKgRDHn278LzaTFmck3ZC1VPvz5WgAbbWRPnkzuZmalq+fNcyHntn8f5cbyefPYefy+nzr9y+DvVT4/1L67tFmePh9yd0zJ9nElzQcOEKBkw5znb810W4vuw9P3jCYZ2wz4lDfypvD/Mktkk5L/+y8yQKAYOHcIUPwmjcf8R6vyj5fQ4Zd5PeGa14XhPWeiJ3eL1qWCJBenrM/IyVfk2XxA6SzzqErkBgEKkkpeBjEAAP8gQMnDPkarZslfW5MEZyZzq/XM8eOs2/57ptuqlj7E8gtrvSA/HD1pXtj1OTUjsixl8rc98JGZ/s/W5zj5PGvWLGvfvr1VrlzZChQoYFOmTAm7PxAI2IABA+zII4+0Qw45xM466yxbtWpV2Dbbtm2zTp06WcmSJa106dLWvXt327lz58G/GiQ1BR+RF2Qj1R96Qf5J1X2vou7QS263AfI7g7Jr1y5r1KiRdevWzTp06JDp/mHDhtkTTzxhL774otWoUcPuvvtua9u2ra1YscKKFy/utlFwsmHDBps2bZrt3bvXunbtaj179rQJEyYc7OtBnDH19F8affJzopuQtPKqBib0cYP1Pkk4vXuuMm4Fcvc4bxVcHXa9quVf1hD+lt8jjbIjx4eadu3auUs0yp489thjdtddd9lFF13kbnvppZesYsWKLtNy+eWX28qVK23q1Km2YMECa9KkidtmxIgRdt5559nw4cNdZgZIJV53UnDuFGH+lJyLyFKU3P33/qySsfd/v51jvprfJZeBhJ9EFpj7ofA7bcxM7xGTcf2YrV271jZu3Oi6dTylSpWyZs2a2Zw5c9x1/VS3jheciLYvWLCgzZsX3l8KAADSU1yT9QpORBmTULru3aefFSpUCG9E4cJWpkyZ4DaRdu/e7S6ejIyMeDY7fcVpcrnOrw/JdNtJJf9p6d4tlEyzq0YOjY7WXZLoNuankrtDJtVzaiWoJam05MTfJ66I9v0bv4k9m6fI4rFJcdgYOnSoDR482JJeElRNJ6PIf4wjM0olrC048ME+o9iRlmyy032TOagxs+L+/oqN12i2aLVqxSIGDG1e3y/KXx6T4+cauf3v2XI915VuaHmBGrzEiuuurlSpkvu5adMmN4rHo+snnHBCcJvNmzeH/d2+ffvcyB7v7yP179/f+vbtG5ZBqVq1ajybDoQ5+sMN4TUjKV6Q6h1c/67lSGw9R65HxUQuK5DOSwpE3YelE9CQ1C4mnbsvcxDDzLE+DFA0akdBxvTp04MBiYIJ1Zb06tXLXW/RooVt377dFi1aZI0bN3a3zZgxw/bv3+9qVaIpVqyYuyDFkFHKmVQayuqjfZaRhIFodkR7XZGj0Za0Kmd+EnVqgZqpOTIrL7M8zS1NAxTNV7J69eqwwtjFixe7GpJq1arZzTffbPfee68de+yxwWHGGplz8cUXu+3r1q1r5557rvXo0cNGjx7thhn37t3bjfBJ9RE8UYdx8Y8PsboHIuoeoh1wSvq8CyG3rzUtAqa1e/73S73Y20QZseQk4fuOJJhc1Gdy/ClfuHChnXnm34Ojva6XLl262NixY61fv35urhTNa6JMSatWrdywYm8OFBk/frwLStq0aeNG73Ts2NHNnYLUmf0205lQLgOxzzNeCX/cXLanSsaiHPXHJ2P3DvJXdjIvWW0T/LyVtKQXLfPxRfmaCWkLUkeOA5QzzjjDzXcSi2aXveeee9wlFmVbmJQtxvTzKZpRSdXsUbRsQLyKQCPrSVLgOIYkFDW4jyikjZwATqpazbz57igTn66QZCx2fTTN1iJLwrfIH7KznHZeTdWe6NRcpucvzaiZAxecIplF75aCXyXDgTzT96jP1m7zAwIUn0l0piEyqMrNQny5Fe0szOyvQmoAiRt67HeZhh6XiN+cIkgcApRUxQgZ5EM3VHaGOdM1lb0ulJK7f06aAthsBTH5uIhgZHsqlIjP47JgaWL5/18CcMAsSySyLvklU51MHnaFZKcoNXLdo2Qc5ZTrfcFMDEmNIdaZseQTAADwndQ8vUBy1tsQLvuLUvTejKxOxNzl8BUKeRMjP7uBmuf2uZK0y58AJUUleqQPssbBBH7l99lmkT4IUIAUCmIIfOAXiRwNlK1MQ5rXdyQDApRcSpfq7nR5nfDXaCAgmWfSRnwQoABIPBZCRArP04LcIUBBysnOujv0s6eHVF2pGHkjVTPGc5K0JpEABQB8hKAK+AsBShJI1ugXAA52IsYL9x+TkLYg8QhQkEddKnyppEJfvzcjKwDkNwIU5AvOjAAAOcHcnQAAwHfIoABpiAndkDyLf6avOWlef0iAAiDXCHSQ1whi0hddPAAAwHfIoABJjDkzAKQqApQ0xsga+DXQKlmcryYg3fEtgKSfth4AkHoIUJAnKGzDwaDrCgABChKGIAYAEAsBCgCkqchVvdPphIh6O/8jQEGW0qWQNp2+qAEgGTAPCgAA8B0CFAAA4DsEKAAAwHcIUAAAgO9QJAsAyHYB+ZJW5SxdpMsgAb8igwIAAHyHDArSYvI0hhEDSNfvv2RFgJKiSE0CAJIZXTwAAMB3CFAAAIDv0MUDAEg71Jv4HxkUAADgOwQoAADAdwhQAACA71CDgpTDnCcAkPzIoAAAAN8hg4Ico/odAJDXCFAAALmWzosJIsm6eAYNGmQFChQIu9SpUyd4/x9//GHXX3+9lS1b1kqUKGEdO3a0TZs2xbsZAADkSQY59IIky6Acf/zx9uGHH/79JIX/fpo+ffrY//3f/9mkSZOsVKlS1rt3b+vQoYN9+umnedEUAECayc/AgXXPkixAUUBSqVKlTLfv2LHDnn/+eZswYYK1bt3a3TZmzBirW7euzZ0715o3b54XzQEAAEkmT0bxrFq1yipXrmw1a9a0Tp062Q8//OBuX7Roke3du9fOOuus4Lbq/qlWrZrNmTMn5uPt3r3bMjIywi4AACB1xT2D0qxZMxs7dqwdd9xxtmHDBhs8eLCdeuqptmzZMtu4caMVLVrUSpcuHfY3FStWdPfFMnToUPc4AACkIrqK8iFAadeuXfD3hg0buoClevXq9uqrr9ohhxySq8fs37+/9e3bN3hdGZSqVavGpb0AACANhxkrW1K7dm1bvXq1nX322bZnzx7bvn17WBZFo3ii1ax4ihUr5i6IjWpyAEAqyfOZZHfu3Glr1qyxI4880ho3bmxFihSx6dOnB+//+uuvXY1KixYt8ropAAAgXTMot956q7Vv39516/z00082cOBAK1SokF1xxRVuWHH37t1dd02ZMmWsZMmSdsMNN7jghBE82c+OpHu/JIDUWisr1Sd3o77EJwHK+vXrXTCydetWK1++vLVq1coNIdbv8uijj1rBggXdBG0andO2bVsbOXJkvJuBFMZigEDy4N8rfBOgTJw4Mcv7ixcvbk899ZS7AACQjqgbPDBWMwYAAL7DYoEAgIRiwUFEQ4CShGk+UoMAgFRHgIKE4+wJSB0UxSJeCFAAAPCht9J8igmKZAEAgO+QQQEAII6oE4wPMigAAMB3yKCkUf8hUT0AIFmQQQEAAL5DgAIAAHyHLh4AAJLUWym8UjIZFAAA4DtkUHKJglMAAPIOGRQAAOA7BCgAAMB36OJJgvlL0g2LBwIAyKAAAADfIYOSjyis/QvLsQMADoQABXFF9wyAg8X3SHTpdpJLgAIAQAp5K0XqKKlBAQAAvkOAAgAAfIcABQAA+A41KNmQboVJAAAkGgEKAMDXGNWT9yfZfiykJUABAKTUXEoEMKmBAAV5iknZAAC5QZEsAADwHQIUAADgOwQoAADAd6hBAQAgzb0VZaRPC0ssAhTkCMP9AAD5gQAFB4VROgCAvECAAgBIKWR6UwMBCgAg7RHU+A+jeAAAgO+QQUlh8TgjoMYEQCriu83/CFDSWLR/oKQ1AQB+QBcPAADwHTIoCEPaEwDgBwQoAAAc5Mka3ePxRxcPAADwHTIoAAAcJAYdpFiA8tRTT9lDDz1kGzdutEaNGtmIESPs5JNPtmTkh0l+qB8BAP+gmyhJA5RXXnnF+vbta6NHj7ZmzZrZY489Zm3btrWvv/7aKlSoYKkuN8EEH14ASF1+ONH1k4TVoDzyyCPWo0cP69q1q9WrV88FKoceeqi98MILiWoSAABI5wzKnj17bNGiRda/f//gbQULFrSzzjrL5syZk2n73bt3u4tnx44d7mdGRkaetG/373sPuE39OVvDrv+Ww8f4bc++HLfr2Bkbs37MA/x9ZJty04ZEoN35JxnbnKztTsY2J2u7o30fJ0O7j434zl/Womy+Pn9eHGO9xwwEAgfeOJAAP/74o1oW+Oyzz8Juv+222wInn3xypu0HDhzotufChQsXLly4WNJf1q1bd8BYISlG8SjTonoVz/79+23btm1WtmxZK1CgwEFFclWrVrV169ZZyZIl49RaRMO+zj/s6/zDvs4/7OvU2N/KnPz6669WuXLlA26bkAClXLlyVqhQIdu0aVPY7bpeqVKlTNsXK1bMXUKVLl06bu3RzucDnz/Y1/mHfZ1/2Nf5h32d/Pu7VKlS/i2SLVq0qDVu3NimT58elhXR9RYtWiSiSQAAwEcS1sWjLpsuXbpYkyZN3NwnGma8a9cuN6oHAACkt4QFKP/85z9ty5YtNmDAADdR2wknnGBTp061ihUr5lsb1G00cODATN1HiD/2df5hX+cf9nX+YV+n3/4uoErZhD07AABAFCwWCAAAfIcABQAA+A4BCgAA8B0CFAAA4DtpHaA89dRTdvTRR1vx4sXdisrz589PdJOS2tChQ61p06Z2+OGHuxWpL774Yrc6dag//vjDrr/+ejcLcIkSJaxjx46ZJuxDzj3wwANuVuWbb745eBv7Or5+/PFHu/LKK93+POSQQ6xBgwa2cOHC4P0ab6BRiUceeaS7X2uLrVq1KqFtTkZ//vmn3X333VajRg23H2vVqmVDhgwJW7uFfZ07s2bNsvbt27tZXPV9MWXKlLD7s7NfNYt7p06d3ORtmjC1e/futnPnTssTgTQ1ceLEQNGiRQMvvPBCYPny5YEePXoESpcuHdi0aVOim5a02rZtGxgzZkxg2bJlgcWLFwfOO++8QLVq1QI7d+4MbnPttdcGqlatGpg+fXpg4cKFgebNmwdOOeWUhLY72c2fPz9w9NFHBxo2bBi46aabgrezr+Nn27ZtgerVqweuuuqqwLx58wLffvtt4P333w+sXr06uM0DDzwQKFWqVGDKlCmBJUuWBC688MJAjRo1Ar///ntC255s7rvvvkDZsmUD77zzTmDt2rWBSZMmBUqUKBF4/PHHg9uwr3Pn3XffDdx5552BN954w62HM3ny5LD7s7Nfzz333ECjRo0Cc+fODcyePTtwzDHHBK644opAXkjbAEWLEl5//fXB63/++WegcuXKgaFDhya0Xalk8+bN7h/Bxx9/7K5v3749UKRIEfeF41m5cqXbZs6cOQlsafL69ddfA8cee2xg2rRpgdNPPz0YoLCv4+v2228PtGrVKub9+/fvD1SqVCnw0EMPBW/Te1CsWLHAf//733xqZWo4//zzA926dQu7rUOHDoFOnTq539nX8REZoGRnv65YscL93YIFC4LbvPfee4ECBQq4RYDjLS27ePbs2WOLFi1y6StPwYIF3fU5c+YktG2pZMeOHe5nmTJl3E/t871794bt9zp16li1atXY77mkLpzzzz8/bJ8K+zq+3nrrLTfr9aWXXuq6L0888UR79tlng/evXbvWTTgZur+13oi6jtnfOXPKKae4ZU+++eYbd33JkiX2ySefWLt27dx19nXeyM5+1U916+jfgkfb6/g5b968uLcpKVYzjreff/7Z9XNGzlqr61999VXC2pVKtLaS6iFatmxp9evXd7fpw691mCIXetR+133ImYkTJ9rnn39uCxYsyHQf+zq+vv32Wxs1apRbouOOO+5w+/zGG290+1hLdnj7NNp3Cvs7Z/7zn/+4lXQVUGtRWX1X33fffa7uQdjXeSM7+1U/FaCHKly4sDsJzYt9n5YBCvLnzH7ZsmXuzAfxpyXQb7rpJps2bZor8kbeB9w6a7z//vvddWVQ9PkePXq0C1AQP6+++qqNHz/eJkyYYMcff7wtXrzYneyosJN9nV7SsounXLlyLjKPHNGg65UqVUpYu1JF79697Z133rGZM2dalSpVgrdr36p7bfv27WHbs99zTl04mzdvtpNOOsmdwejy8ccf2xNPPOF+11kP+zp+NKqhXr16YbfVrVvXfvjhB/e7t0/5Tjl4t912m8uiXH755W6kVOfOna1Pnz5ulKCwr/NGdvarfup7J9S+ffvcyJ682PdpGaAoLdu4cWPXzxl6hqTrLVq0SGjbkpnqrhScTJ482WbMmOGGCYbSPi9SpEjYftcwZH3Js99zpk2bNrZ06VJ3dulddIavNLj3O/s6ftRVGTlkXjUS1atXd7/rs64v6ND9rW4K9cuzv3Pmt99+czUNoXRCqe9oYV/njezsV/3USY9OkDz6rtd7o1qVuAuk8TBjVSePHTvWVSb37NnTDTPeuHFjopuWtHr16uWGqH300UeBDRs2BC+//fZb2NBXDT2eMWOGG/raokULd8HBCx3FI+zr+A7lLly4sBsCu2rVqsD48eMDhx56aGDcuHFhQzT1HfLmm28Gvvzyy8BFF13E0Ndc6NKlS+Coo44KDjPWkNhy5coF+vXrF9yGfZ37UX9ffPGFu+jw/8gjj7jfv//++2zvVw0zPvHEE91w+08++cSNImSYcR4YMWKE+wLXfCgadqxx3cg9feCjXTQ3ikcf9Ouuuy5wxBFHuC/4Sy65xAUxiH+Awr6Or7fffjtQv359d2JTp06dwDPPPBN2v4Zp3n333YGKFSu6bdq0aRP4+uuvE9beZJWRkeE+x/puLl68eKBmzZpu7o7du3cHt2Ff587MmTOjfkcrKMzuft26dasLSDQ3TcmSJQNdu3Z1gU9eKKD/xT8vAwAAkHtpWYMCAAD8jQAFAAD4DgEKAADwHQIUAADgOwQoAADAdwhQAACA7xCgAAAA3yFAAQAAvkOAAgAAfIcABQAA+A4BCgAA8B0CFAAAYH7z/9hDmrbN05E4AAAAAElFTkSuQmCC", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "from neps import algorithms\n", - "import matplotlib.pyplot as plt\n", - "values=[]\n", - "for _ in range(10000):\n", - " values.append(algorithms.hyperband(SimpleSpace(),sampler=\"uniform\")({},None).config[\"int_param3\"])\n", - " values.append(algorithms.neps_hyperband(SimpleSpace(),sampler=\"uniform\")({},None).config['SAMPLING__Resolvable.int_param3::integer__1_100_False'])\n", - " values.append(algorithms.hyperband(SimpleSpace(),sampler=\"prior\")({},None).config[\"int_param3\"])\n", - " values.append(algorithms.neps_hyperband(SimpleSpace(),sampler=\"prior\")({},None).config['SAMPLING__Resolvable.int_param3::integer__1_100_False'])\n", - "\n", - "plt.hist([v for n,v in enumerate(values) if n % 4 == 0], alpha=0.5, label='HB with uniform sampler',bins=100)\n", - "plt.hist([v for n,v in enumerate(values) if n % 4 == 1], alpha=0.5, label='NePS HB with uniform sampler',bins=100)\n", - "plt.hist([v for n,v in enumerate(values) if n % 4 == 2], alpha=0.5, label='HB with prior sampler',bins=100)\n", - "plt.hist([v for n,v in enumerate(values) if n % 4 == 3], alpha=0.5, label='NePS HB with prior sampler',bins=100)\n", - "plt.legend()\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "4d423fb2", - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAGdCAYAAAA44ojeAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAAQalJREFUeJzt3QucjHX///EPLUtCDiEhKkUl5RCLuxOlklTuSrdKEhUqKspdSHJIhcghkkPppCK5SwmJrHOUyKHcSKETG3LK/B/v7+9/zT0zO3uetdfuvp6Px9idmWtnvnPNmO/7+h6ub4FAIBAwAAAAHymY0wUAAACIREABAAC+Q0ABAAC+Q0ABAAC+Q0ABAAC+Q0ABAAC+Q0ABAAC+Q0ABAAC+E2e50LFjx+ynn36y4sWLW4ECBXK6OAAAIB10btg///zTKlasaAULFsx7AUXhpHLlyjldDAAAkAnbt2+3SpUq5b2AopYT7wWWKFEip4sDAADSISkpyTUwePV4ngsoXreOwgkBBQCA3CU9wzMyPEj2iy++sJYtW7r+Iz3BjBkzUtz2vvvuc9sMHz487Pbff//d2rZt68LFySefbB06dLB9+/ZltCgAACCPynBA2b9/v9WuXdtGjRqV6nbTp0+3JUuWuCATSeHk22+/tTlz5tisWbNc6OnUqVNGiwIAAPKoDHfxXHPNNe6Smh07dtgDDzxgn3zyibVo0SLsvvXr19vs2bNt+fLlVq9ePXfbyJEj7dprr7Xnn38+aqABAAD5S1x2TAG+4447rEePHnbeeecluz8xMdF163jhRJo1a+amGy1dutRuvPHGZH9z6NAhdwkdZAPAX1MHjx49an///XdOFwVADjrhhBMsLi4uJqcAiXlAefbZZ13hHnzwwaj379y508qVKxdeiLg4K126tLsvmkGDBlm/fv1iXVQAMXD48GH7+eef7cCBAzldFAA+cOKJJ9qpp55qhQsX9k9AWblypb344ou2atWqmJ5ArVevXvbwww8nm6YEIGepxXTLli3uqEnds/pC4uSJQP5tST18+LD98ssv7nuhevXqaZ6M7bgFlIULF9ru3butSpUqwdvU5PvII4+4mTz//e9/rUKFCm6bUGoa1swe3RdNfHy8uwDwF30ZKaTogEFHTQDyt6JFi1qhQoVs69at7vuhSJEi/ggoGnui8SShmjdv7m5v3769u56QkGB79uxxrS1169Z1t82bN899yTVo0CCWxQFwnGTlKAlA3lIwRt8HGQ4oOl/J5s2bg9fVjLN69Wo3hkQtJ2XKlAnbXklKLSPnnHOOu16zZk27+uqrrWPHjjZ27Fg7cuSIde3a1dq0acMMHgAA4GQ45qxYscIuuugidxGNDdHvffr0SfdjTJ061WrUqGFNmzZ104ubNGli48aNy2hRAADHwWWXXWbdunVLdZtJkya5GZp+KJ8GbLdu3dqdDFRjotRqj+iqVq2a7GSqfhGXmQ+CBsKkl8adRFJryxtvvJHRpwaQiwybs/G4PVf3K8/O8N/cddddNnnyZDdL8PHHHw/errNj63QHGfmeizVV9qpwo1WsqnB1IswbbrgheN3jDVb+5z//6V5XrMbuvf/++641PLRSU/nSCi3HS2T59L5qTOTixYutbNmyVrJkyRwtHzKHjmMA+ZYG8OnUCH/88YflZhMnTnRTvdXlPnr0aHvttdfsmWeeidnj66AyPYu75ZTI8n3//fduOMH555/vhhhkZmaZJnhobCRSp4Gw2YWAAiDf0qB+VWBqbUjNokWL7B//+IeboaAZSzrPk5b9CG1R6N+/v912221WrFgxO+2008KWA1FrzFNPPeXG6alVQ60cKZ0rKjPUtaLXobJdd9111qpVK3e6h5SohUVj/zxqCVEl/t133wUrHb2Ozz77LFkXin7XDI3u3bu7v4ms/HUGcYWDk046yY03VHDKSLeQWrBCH1P77cILL3ShS/tZrSEas/jnn38Gt4ks3wsvvOCWUNHj6LoohN55551WqlQpN+NMZ0TftGlTsrLMnDnTzj33XPc+bdu2zT2nwp7+Vq/p9NNPd9toKq32s2674IIL3PCHlKT1/uu16eSlCll6H//1r3+FzXb9/PPP3WvRvtWQCn0Or7jiCrfNxx9/7Pa3urP0d6HnI9Jr1/usi/abWpN69+6dauugWu3uueceO+WUU9xj6nnWrFmT7P145ZVXrFq1almapZMWAgqAfEtdIgMHDnTLbfz4449Rt9HRuCpajWn4+uuv7e2333aBJbSCl+eee86tU/bVV1+5LqOHHnrIrTcm7733ng0bNsxefvllVymqEq5Vq1a2vKaNGze6mZGpzYq89NJLXaXnWbBggau8vNu0FIkmMDRq1Chqd0qlSpXs6aefduEjNICoctSSJapwFRBUwT/66KNZfk16D7TPtHabLirv4MGDo26r8mkShmaMqmy67nXpKUQoXOiM5qqkNQZSrzO0/GpRU+Wr9eK8k4rqvWvcuLF7b7V8i2amKrDcfvvtLgieeeaZ7npKFX9a77/KoICrIKD7NDTirrvuSvY4CgcvvfSS67ravn273XLLLW78iIZM/Oc//7FPP/3UfZZDqbtLJ0NdtmyZO0/Z0KFD3etLyc033xwMPpptW6dOHTdeVKcC8WiijF6T9q0myWSXmJ9JNk+an/rRlfwyK8qbVPUfaf7dKQ+Ef8kBOL403kRHhH379rUJEyYku1+tK1rg1DtC18mnRowY4Sr5MWPGBI8gVYF5Y1nOPvts+/LLL12ldOWVV7qKWkfGarHRWAkdSV988cWplmvv3r3u6Dw91HKjsKVzSmlZELWi6ASXKdGRtQKUWgFUea1bt84dWSugaBV6/axfv37Uc9uoO0XP5R3th1JFq9mZqrBFIU5BJqvU1aIWDq8bRwFh7ty5NmDAgKjlU7l10kCvfAoFCiZ6T7zQpckaanFSIFCl7JVfXWQKmqEUZO699173uyaE6H3X/vH+7rHHHnOBaNeuXVHP55XW+3/33XcHfz/jjDPc56t+/fpu1mzoZ0AtOfqcSYcOHdx7rPCmv/FaxubPn+/K49Fr1OdQLTCaTfvNN9+46wpxkRS8FWQUULzxSwqc2kfvvvtucFFftbBNmTLFtbJkJ1pQAOR7OmrWkaYWM42ko1pVjqoovIvO7+SdRdejCiqUrnuPp4rsr7/+chWJKgYNclWYSI0qYx2dRl6iUYWj+1RWtTCoFUWVeEo0NkMVuVoiNJhU3QYKNbou+ul1jWSEgoEXTkSnO488MWdmqJsldIxJRh9X74OCWGirkk6JoQo79D1XqFF3TaTQ28qXL+9+hraAeLelVKa03n+1VLRs2dIFF71OhV8v2KRWDu1vL5x4t0WWoWHDhmFdZvpcKrBFWzdLnx+FIu2b0M+7PucKQh51c2V3OBFaUADke5dccokLHToijWxa1xe2jp6jjRkJPWt2anQUu2HDBjemQ90+nTt3dl1CCgKhs08iT3Z11llnpevxdXTubatKV+Mz1KqiI+5oj6EKS69ZLSU6UlYYUeWn1pe1a9e6LoTMdM1EvhY9T2rjHfQaI+8P7XJJ7XGzYwCrxnZEG1Ab+vze/dFuS6lMqb3/ao3QZ08Xteqo4lcwad68ebIBqJHPGev9os+6wl9o958ndKyQxicdDwQUADBzYxrU1eOdVNKjPnh1gaQVFpYsWZLsugYvhlZ+OkrWpUuXLu5cUGpu1+PHmrpgREftKdFR+vjx411AUVeJwoJCiypOBRWvKyEatTTEYuVqVcYKUxpw7FV62TGmQe+DWiyWLl0a7OL57bffXGjQgNjjIaX3XwFNZdHnz1tjLrUBtxml1xz5uVQ3pfcZCaXPohbtVWuTWq1yGl08APD/m+w11kT9/6HUn68WBY2nUOWp5vEPPvgg2SBZjW8YMmSI617RDJ5p06a5cR6iLiKNb1HrxA8//GCvv/66q7DUVB4LmnmhiuWnn35yR+Ua96FxMKEBKZJaTRS8NBhUJ8v0btNRvGaUpHaUrMpLg2B37Nhhv/76a6bLrS4XdVP8+9//dl0IGuypfRVrqpA140bdKxpnoa4MDXDVbCvdnt1Se//VCqfAp8Gtuk9jZTRgNlbUGqMTqiqMvfnmm+55vM9lJI2RUReQzrGjAbcarKvP/hNPPBHT0JRetKCk4wRTDbf9lmybmQX/d7p/qb0n+X/SpPX/15+bmoXvZf38Cwlnhi8vEE3nCztn+XmAvE4Vu2bphFLXhyp9fUlrqrGOeDXO4tZbbw3bToui6ku8X79+bnqmZkuomd5rHtcRsioKtTwoDH344YfJlgbJLG+tMzXxq7tHLSGanaQj4ZSoDCqXgow3EFMBReVLa/yJ9pO6vbQf1NqS2ZPaaRyMKusePXq41hzNFtFMFW8wZqzPFaOKWWNt1HWiffTRRx+l2MUWS2m9/wowCmkKx2rF0MDU66+/PibPrdlFaknToFy1mmgfpLR/9fnRPtFnXZ8pDaL2Pk/eOJvjqUAgJ0+XmElJSUluTrdGueuLIPsDyri0A8qiKAEl/tQ0n2th0+QDsjKKgIKccvDgQTeALrvPh+B3fjuzKiAKmuq2PN6nsk/teyEj9TddPAAAwHcIKAAAwHcYgwIAWRRtUVQgp30eZbpwbkILCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgD4gE4zPmPGjFS30UrLWifFD+X77rvvrGHDhu5MoTpbKVKegq59lx2LIOZ1nAcFQPaYP+j4PdflvTL8J6rstcheZCjQuSMuv/xy++OPP9waKt51jyrkM844I9U1TTLj559/tlKlSgUrNZ0m/KuvvvJN5R9aPunbt69bUFCL0Hlr+QCxREABgHRQRay1Q7TwmhZ6u//++91ieVrgLha0KJufRZZPqw+3aNEiSysya9E+reSL1B3Op/uJLh4ASIdy5cq5SlotGw8++KD7uWrVqqjbag3WU045xd59993gbWoJOfXU/y0gumjRIouPj7cDBw4k60LRY8tFF13kbo9cXVir3eqxtBpuly5d7MiRIxnqFtKihqGPqd/1mnr27OlWGNbr1KrCoULLp99XrlzpVjXW796233zzjV1xxRVWtGhRVza1MO3bty9ZWQYMGGAVK1a0c845J9gF8s4777jVovW39evXt40bN9ry5cutXr16roXmmmuucavrpkQtXm3btnX7XY9RvXp1t4Kx57HHHnMrN5944omuBax3795h+02vQe/Rq6++alWqVHHP2blzZ7f68JAhQ9w+0WdAZY/cL2PGjHHl0/PqsUPf92jWrl3rttdzaJXgO+64w3799dew96Nr167ufSpbtmxwVez8hoACABmg8DF79mzbtm2bNWjQIOo2qrS0RL13qnFVnuvXr3etLxq7IQsWLHAVsSrMSMuWLXM/P/vsM9e18v777wfvmz9/vmu90M/JkyfbpEmT3CWr9Fjqslm6dKmrkBU+5syZE3Vblem8886zRx55xP3+6KOP2v79+11Fqm4gBYtp06a58quiDTV37lzXGqXHnjVrVliX0ZNPPulCX1xcnP3rX/9ygenFF1+0hQsX2ubNm61Pnz4pll+BY926dfbxxx+7fa3QoMrdU7x4cbeftI0ec/z48TZs2LCwx9B+1d/r/X3zzTdtwoQJrpXoxx9/dO/Xs88+68qofRT53K1bt7Y1a9a4kNSmTRtXhmjUragQp/C5YsUK91y7du2yW265Jdn7UbhwYfvyyy9t7Nixlh/RxQMg31IFGTl+QkfM0VSqVMn9PHTokB07dsxV4AohKdFR8Msvv+x+/+KLL1yFpKNwhZYaNWq4n5deemnUv1UrgKgVIrJrRQHgpZdeshNOOME9jipQVfodO3a0rLjgggtcSBC1Pug59LhXXnllsm1VJoUI7TuvfKrwDx48aFOmTHFBR/QYLVu2dBW7WgpE973yyivBLgtvHSOFHK+lQON7brvtNvf8jRs3drd16NAh1SCmwKh9rBYXqVq1atj9ChYe3afne+utt1wI8uh9VQuKwsy5557rxh4pTH300UdWsGBB1+Kj16JwGBpOb775Zrvnnnvc7/3793fha+TIkTZ69Ohk5dQ+UTkHDhwYvE3PWblyZddqpFYe7z0YMmSI5WcEFAD5liogHWmH0tHx7bffnmxbHcWr4lJAUQuHWgbUHaKxKNEofKiiVbeEjr4VWLyAosp28eLFYZVjeqnlQuHEo64eda1klQJKKD3u7t270/33ajGoXbt2MJyIwoUqfVXyXkCpVatW1PEUoc8fum3obamVR++DWjHUAnPVVVe5rqRGjRoF73/77bdtxIgRrpVE3U5Hjx51Y4pCKbjoPQ59Tu1rhZPUypGQkJDsekqzdtTKooATbWCxyuYFlLp161p+R0ABkG+pMj3rrLPCblNzfjQaF6JZPV5IUJDReISUAooqVwUYhRNdtK0Cio7A1QWi8Q+hFWh6FSpUKFl3kkJASlS5qlsqVLQxKxl93MwKDTApPb+eO9ptqZVHYzq2bt3qWjvUgqHByxqfo/E6iYmJruulX79+rpWmZMmSrvXkhRdeSLEM3nPGer8oHHmtSpFCxygVS2E/5SeMQQGATNCRtcaUpEQVmQZ9fvDBB/btt99akyZNXCuBWmDU9aOuiJQqIa+FIaXupoxQd5HGiYTKjnNy1KxZ07UOaCyKR+MnvK6R40GvtV27dvb666/b8OHDbdy4ce52tVZpttETTzzh9ru6TxRmYmXJkiXJrmt/RFOnTh33eVBrjcJx6IVQEo6AAgDpoGb9nTt3uopNA0Bfe+01a9WqVap/o24dDbbU7BA16auy1riVqVOnpjj+RDRbRDNCvAGUe/fuzXS5NSBTgzE1NmTTpk1unIlmkcSaWih0jhgFBD2+ujEeeOABN0PF67LJThpAqzCowbQKABpf5IUEBRKNUVGribpR1NUzffr0mD23Pg8aR6IxJNq/XhdgNGrV+f33390YG7WkqTyffPKJtW/fPiaBNC8hoABAOqgVQE3wOtLVlNV7773XDYRMjUKIKp3IKb2Rt0XSAFRVompp0XTctIJQatSloVkmGu+iWUN//vmn3XnnnRZrmo2kilaVr57nn//8p+tm0aDQ40GtTr169XKtVAqBauFSIJHrr7/eunfv7kKDwqJaVLRPYkVdR3ouPbeCoEKpBtlGo/dTLUv6DGisjLoCNZ1Y3YehY11gViAQ2TmZCyQlJbk+RB1VRA5yioVhczaGXW+47f+aCUPNLLg57HrtRb8mL2f8//oTU7KwafjAtMxIOLNMmtt0vrBzlp8HiKRZG1u2bHHjM3T0DOQ36spTa0xOLUGQ274XMlJ/E9cAAIDvEFAAAIDvMM0YAIBMyoWjJHINWlAAAIDvEFAAAIDvEFAAAIDvEFAAAIDvEFAAAIDvEFAAAIDvEFAAAKnSafl1OvbUTJo0Kbja8/Fw11135buztxYoUMBmzJhh+UWGz4PyxRdf2HPPPWcrV650K2SGnuJXS3g/+eSTbrnrH374wZ3OtlmzZjZ48GC3/oBHazVoEakPP/zQrT3QunVre/HFF91iWgDyhtGrRx+358rMUg6q4CZPnmyDBg2yxx9/PHi7KoAbb7wxR89vocpegWDPnj1pnlpd1z1af0bftVoHR68rPj4+JuV5//33rVChQsHrWolX5UsrtGQn1RmcgyRvy3ALipbSrl27to0aNSrZfQcOHLBVq1a5RZj0Ux/qDRs2uIWaIle91GqTc+bMcStOKvR06tQpa68EADJI64Q8++yz9scff1huNnHiRHfAqPVPRo8e7VZafuaZZ2L2+KVLl7bixYubH2iRvWPHjrkD4Ky02CjcHD161PKTw4cPW54OKNdcc4374OsII5I+MAodt9xyi1v5s2HDhm4lS7W2aKlrWb9+vVtC/JVXXrEGDRpYkyZN3IqgWgnyp59+is2rAoB0UAtvhQoVXGtDahYtWmT/+Mc/rGjRola5cmV78MEH3cFaaItC//797bbbbrNixYrZaaedFnYQp8rwqaeesipVqrhWDbVy6DFiRRW1XofKdt1117nVj3WQmBK1sGhlX49aQtQS89133wUrMr2Ozz77LFkXj37funWrWx1YfxPagiNa0bhmzZquRfzqq692wSkln3/+ufv7//znP24lYAVG1Rtr165N1nU0c+ZMt0Kw9p/qk8gunkOHDrl9Wq5cOfc4qluWL1+e7Lk+/vhjq1u3rnscva+R9Nq1b7RytR7n9NNPD/t8DB061K1ArP2j/d25c2fbt29fsvLq4Fv1oFZ51v7WAbxa7PRZKVWqlCurwlZ6P0PRbN++3dW3ej6FSL3v//3vf4P3e/towIAB7jOn8uQm2T4GRSsW6kPhJd3ExET3e7169cK+JNTVs3Tp0qiPoQ+eVkAMvQBAVqlLZODAge4g6ccff4y6zffff+8qWnVFf/311/b222+7ii20ghd1fat1+auvvnJdRg899JA7YJP33nvPhg0bZi+//LJt2rTJdSOpkssOGzdutHnz5rkDwJRceumlrsL2LFiwwMqWLRu8TRW7uuwbNWqU7G/VMl6pUiV7+umnXfgIDSCqhJ9//nnXgqOWcQWJRx99NM0y9+jRw1544QX3vKeccoq1bNnSPX/o46qlSwe2an1XCInUs2dPt58VAhTOzjrrLGvevLkbUhBK742GHehgWaEo0ogRI1wYeuedd1wPwNSpU1148Kiu0jYqh55L+1rPHUrl1TY68NYBufarDuo1/EEX7R99Ft599910f4Yiaf/o9alla+HChfbll18GQ2FoS8ncuXPd6/B6LHKTuOxecvmxxx5zidBbVnnnzp3JPlxxcXEu/em+aJRe+/Xrl51FBZBPqeK48MILrW/fvjZhwoSo3z/qlvZaEKpXr+4qH1XyY8aMCS4n37hx4+BYlrPPPttVGAolV155pauo1cKhgzGN5VBLysUXX5zmwV16x+XpO1ZhS10WOqBTK0qvXr1S3F6tIKr8fvnlF/f9u27dOtc1r4r0vvvucz/r16/vjv4j6btaz6WKUa8pstIcO3asnXnmme66QpyCTFq077WfRJW+ApDG2ah1wHtcdV2p8o5GrVl6L9R6oVZ+GT9+vKuU9Z4qAHlUHu+5otF7pfdYLTA6uFYLSqjQcTcKLupR0D5T+UL3g8rj7Qe1oCiU7Nq1y72nagm6/PLLbf78+XbrrbcG/y61z1AkBWV1dSm0ea1Y6upTA4Dev6uuusrdptYYbVO4cGHLbbKtBUVvkD5catrUG5UV+o+m/6zeRc1aABArOjpXxaij6khr1qxxFZ8qFu+iI1dVDhrz4UlISAj7O133Hu/mm2+2v/76y8444wzr2LGjq3zTGv+gALB69epkl2hUiek+lVVHyWpFueOOO1J87PPPP98FDbWc6Oj7oosucqFG10U/FWIySoHGq5RF3SS7d+9O8+9C953Kpa6I0PdClWu01o7QVi7VOargPQqCCoGR72lo63006hbRvlQZ1A3z6aefht2vbq+mTZu6Lhi9R9rPv/32m2s1SWk/lC9f3oWZ0MCp2yL3TWqfoUh6rzdv3uzK4H0ute/UMKD94VFLXW4MJ9nWguKFE/VTqvnLaz0RJe7IN0X/UdUMF5nGPeorjNVodACIdMkll7jQoYMhVVChNL7g3nvvjTpmRC0h6aGxCmpmV+Wmo3qNW1BzvoJA6OyYUOpKUDdFeui709tWFeuff/7pWlV0dB/tMXTErdesI219tyqMKACo9UXjPxYvXpyurplIka9FzxOLmTYa+xM51iWz1KKQmjp16rjgqbEqer9Ul6nlS90xGt+hIHf//fe7cR0KBOru69Chg+tW8Vqcou2HaLcp5GbWvn373FgadUFFUjdZel9vvgooXjhRP6uar8qUKZMsEWrqnAbOaueKQozeqNT6TAEgO2lcgrp6IgcSqsJSF0haYWHJkiXJrmuwaGglq7EVunTp0sVq1Khh33zzjXv8WFMXjKjVJiXqolI3iAKKKlsFIoUWBScFldDWiEg6Ig8d4JlV2lde2NOMKrUAhe67tKi1QmVSl4jXJaO6SGNaMjMVWgfV6nrRRd0zGtehg2jVW6qrNF5G+0s0ViVW0voMhdLnRt08GjIR2giQl8RlJrWpWcmjpKnmMCVJNefpzdQAJTUz6gPsjSvR/foAaWfrzVYzp/oq9SFSP2WbNm3CzpUCAMeTmsI11kTjS0JpHJ1mluh76p577nFHpAosagnRLEWPKschQ4a4WRO6b9q0aW52iqiLSN+HOgjTUfbrr7/uAkvk+IbM0kGfvmtVeergUOMsNIYhtUperSaaiaPvZY238G5Ty4nGn6R25K3uCg2C1fe2Ao4G2GaFyquDWXV7PPHEE+7xMnISNpVVrRoaa6K6RmFH74W6XdS6kRGapaO6TN1eCiF6H9VCpbEdCqmqszSoWkFT77nqsVhJ7TMUSZ9VhUnN3NH+07gd9VpoELMG7ep6bpfhMSgrVqxwb5wu8vDDD7vf+/TpYzt27HCjnzUaXkciepO9i5oMPWqS0tGD+vGuvfZa959j3LhxsX1lAJBB+qKPbHZX14e6YnRUr6nG3vdd5AHVI488Evx+VNeKKjp1G4kqN7VWqFVCj6euA52oMrKFObPat2/vvmdVKalr57zzznNdFBoAm1ogU7n0Xe2NjVBAUZBKa/yJ9pO6O9RyEdqdkJXWKw3aVau6gpb2TUbHTegxNNNKY0LUuqADaU151pTejNCYDoUEjVVRUNPr1MwbhRUN0tX7qjFLGsejuiytKeoZkdpnKJKCrkKiwthNN93kwqjCmMag5JUWlQKBXHgqPk0z1jlXNGA2O96IYXM2hl1vuC15eJpZ8H+tSFJ70a/Jyxl/aprPtbBpygO/0ivhzDLZcqZNIC36MlQrarVq1YKzWfIjP5xZNTfSGBjNZlG3zvE8Tb4f5aXP0MFUvhcyUn+zFg8AAPAdAgoAAMhfJ2oDgPwg9PTiSD+NdcmFowyyBZ+h5GhBAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAf0Oq2M2bMSHUbrbSckTVqYnF20+HDh1t+OrOt3getbYScx3lQAGSLX0b+byG97HbKA10z/Deq7FURRYaCyNOve9c9OnX3GWec4daO6dSpk8XKzz//HFw3RufE0GnCv/rqK7dWTk7RasCpLRoIZCcCCgCkw4YNG9zaIX/99ZdbzE6r52qxPC16GgtaMdcvDh8+7Bbry+pCgN7j5Cf58TVnF7p4ACAdypUr50KEWjYefPBB93PVqlVRt9XZUVW5v/vuu8HbvBXePYsWLbL4+Hg7cOBAsi4ePbZoVVvdHrm68PPPP+8eS6shd+nSxY4cOZJiuZ966in33C+//LJVrlzZrYJ7yy23uMXaIruOBgwY4FZpPuecc6J28Wzbts1atWrlVj9WWNPj7Nq1K9lzvfLKK6kuILl161Zr2bKlazFSC41WX9aKwaLVlLUqr/6+aNGiriwvvvhi2N975R04cKCVL1/etXRpheWjR49ajx49rHTp0m5l54kTJwb/Rq1S2pdvvfWWNWrUyJVNKxJrperU6H3SKtYqi/af3vv9+/cH79c+6t+/v915551un8SyVS2/I6AAQAYofMyePdtV1g0aNIi6jSrCSy65xHUPibqL1q9f71pfvvvuO3ebKsb69eu7wBBp2bJl7udnn33mun7ef//94H3z58+377//3v2cPHmyTZo0yV1Ss3nzZnvnnXdcy4/Krq6jzp3DVzifO3euayWaM2eOzZo1K9ljHDt2zIWT33//3ZVd2/3www926623Jnuu9957z5V59erVUcujUHXo0CH74osv7JtvvrFnn33WhR7veRQupk2bZuvWrbM+ffrYv//9b1f+UPPmzbOffvrJPcbQoUOtb9++dt1117nQs3TpUrvvvvvs3nvvtR9//DHs7xRgHnnkEbcPEhISXFD67bffopZT+/nqq6+21q1b29dff21vv/22Cyxdu3ZNFhhr167tHrN3796pvhdIP7p4AORbqoi9itGjI/hoVGmKKlZVojpiVwhJiVo91GohqkTVGqIWGIWWGjVquJ+XXnpp1L/1ulbUQhLZ9aMK+KWXXrITTjjBPU6LFi1cuOjYsWOKZTl48KBNmTLFTjvtNHd95MiR7u9eeOGF4OOrJUMtHyl1T+g5FCa2bNniWhJEj6nWD41VUdjyujh0e2rdQwp3qvRr1arlrmtMj6dQoULWr1+/4HW1pCQmJrqAohYbj1pJRowYYQULFnStLEOGDHGtUQoz0qtXLxs8eLALFG3atAn+ncKFnlvGjBnjAtuECROsZ8+eyco5aNAga9u2rXXr1s1dr169untOvW/6W6+F6IorrnChB7FFCwqAfEuDX3WUH3pRJR3NwoULw7ZR94IqqZSoElMLwC+//OJaHBRYdFEwUZfM4sWLk3XdpIcCgcKJR109u3fvTvVvqlSpEgwnopYDhSy1mHgUFlIbO6EWIAUTL5zIueee67pXdJ/n9NNPT3PsirpJnnnmGWvcuLFr+VDrRKhRo0ZZ3bp13eMoQI4bN86Fmsj9oHDiUVePF3hE+0gBL3Lf6LV74uLirF69emHlD7VmzRrXOqUyeJfmzZu7faeg5tFjIPYIKADyLbUanHXWWWGX0Io8lI7kdb8qxvbt29sdd9zhxmykRJWljvIVTkIDin5Xi4NCisZCZJRaGCK7k1RhZlWsZuuk53Huuece1z2kfahWGVXwatURjRF59NFH3TiUTz/91AVC7W+1zKS1H2K9b/bt2+e6iUIDrELLpk2b3ADpjLxmZBwBBQAyQUfoGlOSElWOGlz5wQcf2LfffmtNmjSxCy64wHURqetHlXJKFZvXkpFSd1NGqfVB4zU8S5YsCXaNpFfNmjVt+/bt7uJRC5GmaqslJaPUEqNxIhqrou6R8ePHu9u//PJLF9w0RkbdYgqFGgsSK3rtHg2qXblypXtt0dSpU8e9xsgQqwszdbIfAQUA0kFdBTt37nQzUDSA87XXXnODRlOjFpM333zTzWxR94BCgcatTJ06NcXxJ96MIc0a0fgIzZIJnXGTGRor0a5dO3f0r64qdbFoPEdGpjY3a9bMtQppTIZmL2kgr2au6HVktItDYzo++eQT102ix9KAXy8kaJzHihUr3P0bN250g07V4hQr6j6aPn26G6yswboawHz33XdH3faxxx5zXXEat6LWE7WcKHBGDpJF9iCgAEA6qLVB4z109KyKS03/XrdESlR5qxUkdKyJfo+8LZLGRmgwplpaNO03rSCUFpX5pptusmuvvdauuuoq15IzevToDD2GWoRUOWuQrkKWAosGt2pmS0bp9SscKJRolszZZ58dLI/2q8qq2UGaJaUZNpEzjrJCA2d10awbDaCdOXOmlS1bNuq22k/qklNQUmuYWnQ0q0jvCbJfgYDmzOUySUlJVrJkSXdUoXnnsTZszsaw6w23jUu2zcyCm8Ou1170a/Jyxv/vnAcpWdj0AsuqhDPLpLlN5wtj9x8cCJ0doqPg1M55gZylc5Po/CopTfnNL/xydt78/r2QlIH6mxYUAADgOwQUAADgOwQUAMjjXTz5vXvHOyW9RjTQvZN7EFAAAIDvEFAAAIDvEFAAZFkunAwIwOffBwQUAJnmnVpci7QBQOj3QeTSAxnFasYAsnS6dy0W5y3IduKJJ7oTegHIny0nBw4ccN8H+l4IXdQyMwgoALLEO116WivqAsgfTj755Awto5ASAgqALFGLiU4Br/VjtEIvgPyrUKFCWW458RBQAMSEvpRi9cUEAAySBQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAvkNAAQAAuT+gfPHFF9ayZUurWLGiO4PkjBkzkp2Lv0+fPu7MkkWLFrVmzZrZpk2bwrb5/fffrW3btlaiRAl3StwOHTrYvn37sv5qAABA/gwo+/fvt9q1a9uoUaOi3j9kyBAbMWKEjR071pYuXWrFihWz5s2b28GDB4PbKJx8++23NmfOHJs1a5YLPZ06dcraKwEAAHlGhk91f80117hLNGo9GT58uD355JPWqlUrd9uUKVOsfPnyrqWlTZs2tn79eps9e7YtX77c6tWr57YZOXKkXXvttfb888+7lhkAAJC/xXQMypYtW2znzp2uW8dTsmRJa9CggSUmJrrr+qluHS+ciLYvWLCga3GJ5tChQ5aUlBR2AQAAeVdMA4rCiajFJJSue/fpp1Y9DRUXF2elS5cObhNp0KBBLuh4l8qVK8ey2AAAwGdyxSyeXr162d69e4OX7du353SRAABAbgkoFSpUcD937doVdruue/fp5+7du8PuP3r0qJvZ420TKT4+3s34Cb0AAIC8K6YBpVq1ai5kzJ07N3ibxotobElCQoK7rp979uyxlStXBreZN2+eHTt2zI1VAQAAyPAsHp2vZPPmzWEDY1evXu3GkFSpUsW6detmzzzzjFWvXt0Flt69e7uZOTfccIPbvmbNmnb11Vdbx44d3VTkI0eOWNeuXd0MH2bwAACATAWUFStW2OWXXx68/vDDD7uf7dq1s0mTJlnPnj3duVJ0XhO1lDRp0sRNKy5SpEjwb6ZOnepCSdOmTd3sndatW7tzpwAAAGQqoFx22WXufCcp0dlln376aXdJiVpb3njjDd4BAACQe2fxAACA/IWAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAAfIeAAgAA8n5A+fvvv613795WrVo1K1q0qJ155pnWv39/CwQCwW30e58+fezUU0912zRr1sw2bdoU66IAAIBcKuYB5dlnn7UxY8bYSy+9ZOvXr3fXhwwZYiNHjgxuo+sjRoywsWPH2tKlS61YsWLWvHlzO3jwYKyLAwAAcqG4WD/g4sWLrVWrVtaiRQt3vWrVqvbmm2/asmXLgq0nw4cPtyeffNJtJ1OmTLHy5cvbjBkzrE2bNrEuEgAAyO8tKI0aNbK5c+faxo0b3fU1a9bYokWL7JprrnHXt2zZYjt37nTdOp6SJUtagwYNLDExMepjHjp0yJKSksIuAAAg74p5C8rjjz/uAkSNGjXshBNOcGNSBgwYYG3btnX3K5yIWkxC6bp3X6RBgwZZv379Yl1UAACQX1pQ3nnnHZs6daq98cYbtmrVKps8ebI9//zz7mdm9erVy/bu3Ru8bN++PaZlBgAAebwFpUePHq4VxRtLUqtWLdu6datrBWnXrp1VqFDB3b5r1y43i8ej6xdeeGHUx4yPj3cXAACQP8S8BeXAgQNWsGD4w6qr59ixY+53TT9WSNE4FY+6hDSbJyEhIdbFAQAAuVDMW1BatmzpxpxUqVLFzjvvPPvqq69s6NChdvfdd7v7CxQoYN26dbNnnnnGqlev7gKLzptSsWJFu+GGG2JdHAAAkAvFPKDofCcKHJ07d7bdu3e74HHvvfe6E7N5evbsafv377dOnTrZnj17rEmTJjZ79mwrUqRIrIsDAAByoZgHlOLFi7vznOiSErWiPP300+4CAAAQibV4AACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAACA7xBQAABA/ggoO3bssNtvv93KlCljRYsWtVq1atmKFSuC9wcCAevTp4+deuqp7v5mzZrZpk2bsqMoAAAgF4p5QPnjjz+scePGVqhQIfv4449t3bp19sILL1ipUqWC2wwZMsRGjBhhY8eOtaVLl1qxYsWsefPmdvDgwVgXBwAA5EJxsX7AZ5991ipXrmwTJ04M3latWrWw1pPhw4fbk08+aa1atXK3TZkyxcqXL28zZsywNm3axLpIAAAgv7egzJw50+rVq2c333yzlStXzi666CIbP3588P4tW7bYzp07XbeOp2TJktagQQNLTEyM+piHDh2ypKSksAsAAMi7Yh5QfvjhBxszZoxVr17dPvnkE7v//vvtwQcftMmTJ7v7FU5ELSahdN27L9KgQYNciPEuaqEBAAB5V8wDyrFjx6xOnTo2cOBA13rSqVMn69ixoxtvklm9evWyvXv3Bi/bt2+PaZkBAEAeDyiamXPuueeG3VazZk3btm2b+71ChQru565du8K20XXvvkjx8fFWokSJsAsAAMi7Yh5QNINnw4YNYbdt3LjRTj/99OCAWQWRuXPnBu/XmBLN5klISIh1cQAAQC4U81k83bt3t0aNGrkunltuucWWLVtm48aNcxcpUKCAdevWzZ555hk3TkWBpXfv3laxYkW74YYbYl0cAACQC8U8oNSvX9+mT5/uxo08/fTTLoBoWnHbtm2D2/Ts2dP279/vxqfs2bPHmjRpYrNnz7YiRYrEujgAACAXinlAkeuuu85dUqJWFIUXXQAAACKxFg8AAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAPAdAgoAAMh/AWXw4MFWoEAB69atW/C2gwcPWpcuXaxMmTJ20kknWevWrW3Xrl3ZXRQAAJBLZGtAWb58ub388st2wQUXhN3evXt3+/DDD23atGm2YMEC++mnn+ymm27KzqIAAIBcJNsCyr59+6xt27Y2fvx4K1WqVPD2vXv32oQJE2zo0KF2xRVXWN26dW3ixIm2ePFiW7JkSXYVBwAA5CLZFlDUhdOiRQtr1qxZ2O0rV660I0eOhN1eo0YNq1KliiUmJkZ9rEOHDllSUlLYBQAA5F1x2fGgb731lq1atcp18UTauXOnFS5c2E4++eSw28uXL+/ui2bQoEHWr1+/7CgqAADIDy0o27dvt4ceesimTp1qRYoUiclj9urVy3UNeRc9BwAAyLtiHlDUhbN7926rU6eOxcXFuYsGwo4YMcL9rpaSw4cP2549e8L+TrN4KlSoEPUx4+PjrUSJEmEXAACQd8W8i6dp06b2zTffhN3Wvn17N87kscces8qVK1uhQoVs7ty5bnqxbNiwwbZt22YJCQmxLg4AAMiFYh5Qihcvbueff37YbcWKFXPnPPFu79Chgz388MNWunRp1xrywAMPuHDSsGHDWBcHAADkQtkySDYtw4YNs4IFC7oWFM3Qad68uY0ePTonigIAAPJrQPn888/Drmvw7KhRo9wFAAAgEmvxAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAAAA3yGgAACAvB9QBg0aZPXr17fixYtbuXLl7IYbbrANGzaEbXPw4EHr0qWLlSlTxk466SRr3bq17dq1K9ZFAQAAuVTMA8qCBQtc+FiyZInNmTPHjhw5YldddZXt378/uE337t3tww8/tGnTprntf/rpJ7vppptiXRQAAJBLxcX6AWfPnh12fdKkSa4lZeXKlXbJJZfY3r17bcKECfbGG2/YFVdc4baZOHGi1axZ04Wahg0bxrpIAAAgl8n2MSgKJFK6dGn3U0FFrSrNmjULblOjRg2rUqWKJSYmZndxAABAfmxBCXXs2DHr1q2bNW7c2M4//3x3286dO61w4cJ28sknh21bvnx5d180hw4dchdPUlJSdhYbAADk5RYUjUVZu3atvfXWW1keeFuyZMngpXLlyjErIwAAyEcBpWvXrjZr1iybP3++VapUKXh7hQoV7PDhw7Znz56w7TWLR/dF06tXL9dV5F22b9+eXcUGAAB5MaAEAgEXTqZPn27z5s2zatWqhd1ft25dK1SokM2dOzd4m6Yhb9u2zRISEqI+Znx8vJUoUSLsAgAA8q647OjW0QydDz74wJ0LxRtXoq6ZokWLup8dOnSwhx9+2A2cVdh44IEHXDhhBg8AAMiWgDJmzBj387LLLgu7XVOJ77rrLvf7sGHDrGDBgu4EbRr82rx5cxs9ejTvCAAAyJ6Aoi6etBQpUsRGjRrlLgAAAJFYiwcAAPgOAQUAAPgOAQUAAPgOAQUAAOSvU93j+Ej8/rd0bNPf/CLhzDJpbtP5ws7HpSwAAH+iBQUAAPgOAQUAAPgOAQUAAPgOAQUAAPgOg2TzmEpJK9PcpvaiX5PdlhR/app/t7DpBZkuFwAAGUELCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8B0CCgAA8J24nC4AkCvMH5TmJr/MWp38xqr/SPPvTnmga2ZLBQB5Fi0oAADAdwgoAADAdwgoAADAdwgoAADAdxgkC6Rh2JyN1nDbb6luM7PgZqu959dktyetX5Dm4y987w+LpYQzy6S5TecLO8f0OYH8/h2RllVJb5ufJOSC74kcbUEZNWqUVa1a1YoUKWINGjSwZcuW5WRxAACAT+RYQHn77bft4Ycftr59+9qqVausdu3a1rx5c9u9e3dOFQkAAOT3gDJ06FDr2LGjtW/f3s4991wbO3asnXjiifbqq6/mVJEAAEB+HoNy+PBhW7lypfXq1St4W8GCBa1Zs2aWmJiYbPtDhw65i2fv3r3uZ1JSUraU7+D+fWHX9/91KHmZCh4Ju37g8NFk2/xVIHybaA4fOGixdOivtJ8zp8v6176/0twmu97bzH4eon0GIj8POb1fc+v+BXK7yDrjePw/z63fE95jBgKBtDcO5IAdO3aoZIHFixeH3d6jR4/AxRdfnGz7vn37uu25cOHChQsXLpbrL9u3b08zK+SKWTxqadF4Fc+xY8fs999/tzJlyliBAgWylOQqV65s27dvtxIlSsSotAjFPj4+2M/Zj32c/djHeX8/BwIB+/PPP61ixYppbpsjAaVs2bJ2wgkn2K5du8Ju1/UKFSok2z4+Pt5dQp188skxK4/eIP4zZC/28fHBfs5+7OPsxz4+PnJqP5csWdK/g2QLFy5sdevWtblz54a1iuh6QkJCThQJAAD4SI518ajLpl27dlavXj27+OKLbfjw4bZ//343qwcAAORvORZQbr31Vvvll1+sT58+tnPnTrvwwgtt9uzZVr58+eNWBnUb6Twskd1HiB328fHBfs5+7OPsxz4+PuJzyX4uoJGyOV0IAACAUCwWCAAAfIeAAgAAfIeAAgAAfIeAAgAAfCdfB5RRo0ZZ1apVrUiRItagQQNbtmxZThcp1xo0aJDVr1/fihcvbuXKlbMbbrjBNmzYELbNwYMHrUuXLu4MwCeddJK1bt062cn6kH6DBw92Z1Lu1q1b8Db2cdbt2LHDbr/9drcPixYtarVq1bIVK1YE79e8As0+PPXUU939WkNs06ZNOVrm3Obvv/+23r17W7Vq1dw+PPPMM61///5h67OwnzPmiy++sJYtW7oztOp7YcaMGWH3p2d/6gztbdu2dSdv08lQO3ToYPv2pb3OULYJ5FNvvfVWoHDhwoFXX3018O233wY6duwYOPnkkwO7du3K6aLlSs2bNw9MnDgxsHbt2sDq1asD1157baBKlSqBffv2Bbe57777ApUrVw7MnTs3sGLFikDDhg0DjRo1ytFy51bLli0LVK1aNXDBBRcEHnrooeDt7OOs+f333wOnn3564K677gosXbo08MMPPwQ++eSTwObNm4PbDB48OFCyZMnAjBkzAmvWrAlcf/31gWrVqgX++uuvHC17bjJgwIBAmTJlArNmzQps2bIlMG3atMBJJ50UePHFF4PbsJ8z5qOPPgo88cQTgffff9+tdTN9+vSw+9OzP6+++upA7dq1A0uWLAksXLgwcNZZZwVuu+22QE7JtwFFixJ26dIleP3vv/8OVKxYMTBo0KAcLVdesXv3bvefZMGCBe76nj17AoUKFXJfRJ7169e7bRITE3OwpLnPn3/+GahevXpgzpw5gUsvvTQYUNjHWffYY48FmjRpkuL9x44dC1SoUCHw3HPPBW/Tfo+Pjw+8+eabx6mUuV+LFi0Cd999d9htN910U6Bt27bud/Zz1kQGlPTsz3Xr1rm/W758eXCbjz/+OFCgQAG3wG9OyJddPIcPH7aVK1e6Ji5PwYIF3fXExMQcLVtesXfvXvezdOnS7qf295EjR8L2eY0aNaxKlSrs8wxSF06LFi3C9qWwj7Nu5syZ7uzWN998s+uqvOiii2z8+PHB+7ds2eJOLBm6j7WuiLqI2cfp16hRI7e0ycaNG931NWvW2KJFi+yaa65x19nPsZWe/amf6tbR59+j7VU3Ll26NEfKnStWM461X3/91fWBRp61Vte/++67HCtXXqF1lTQuonHjxnb++ee72/SfQ2swRS7yqH2u+5A+b731lq1atcqWL1+e7D72cdb98MMPNmbMGLcUx7///W+3nx988EG3X7U0h7cfo313sI/T7/HHH3cr6ipAa+FYfR8PGDDAjX8Q9nNspWd/6qdCeai4uDh3kJlT+zxfBhRk/xH+2rVr3RERYkdLoz/00EM2Z84cN7Ab2ROudQQ5cOBAd10tKPosjx071gUUxMY777xjU6dOtTfeeMPOO+88W716tTuo0QBP9jM8+bKLp2zZsi61R85u0PUKFSrkWLnygq5du9qsWbNs/vz5VqlSpeDt2q/qWtuzZ0/Y9uzz9FMXzu7du61OnTruyEaXBQsW2IgRI9zvOhpiH2eNZjice+65YbfVrFnTtm3b5n739iPfHVnTo0cP14rSpk0bN0vqjjvusO7du7vZgMJ+jq307E/91PdLqKNHj7qZPTm1z/NlQFFzbd26dV0faOiRk64nJCTkaNlyK43LUjiZPn26zZs3z00fDKX9XahQobB9rmnI+uJnn6dP06ZN7ZtvvnFHm95FR/tqFvd+Zx9njbolI6fHa5zE6aef7n7X51pf1qH7WF0V6qNnH6ffgQMH3NiGUDpo1PewsJ9jKz37Uz91cKMDIY++y/WeaKxKjgjk42nGGsE8adIkN3q5U6dObprxzp07c7poudL999/vprB9/vnngZ9//jl4OXDgQNgUWE09njdvnpsCm5CQ4C7IvNBZPMI+zvr07bi4ODcNdtOmTYGpU6cGTjzxxMDrr78eNl1T3xUffPBB4Ouvvw60atWK6a8Z1K5du8Bpp50WnGasqbFly5YN9OzZM7gN+znjs/u++uord1HVPnToUPf71q1b070/Nc34oosuclPsFy1a5GYLMs04h4wcOdJ9met8KJp2rLnfyBz9h4h20blRPPqP0Llz50CpUqXcl/6NN97oQgxiF1DYx1n34YcfBs4//3x3AFOjRo3AuHHjwu7XlM3evXsHypcv77Zp2rRpYMOGDTlW3twoKSnJfW71/VukSJHAGWec4c7hcejQoeA27OeMmT9/ftTvYIXB9O7P3377zQUSnZOmRIkSgfbt27vgk1MK6J+cabsBAACILl+OQQEAAP5GQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAL5DQAEAAOY3/w9bLw3+tnymjQAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "import neps\n", - "from neps import algorithms\n", - "from functools import partial\n", - "import matplotlib.pyplot as plt\n", - "global_values = []\n", - "eta=3\n", - "for algo in [partial(algorithms.neps_hyperband, eta=eta), \n", - " partial(algorithms.hyperband, eta=eta), \n", - " partial(algorithms.neps_hyperband, sampler=\"prior\", eta=eta), \n", - " partial(algorithms.hyperband, sampler=\"prior\", eta=eta)]:\n", - " neps.run(\n", - " evaluate_pipeline,\n", - " SimpleSpace(),\n", - " root_directory=\"neps_test_runs/algo_tests\",\n", - " overwrite_root_directory=True,\n", - " optimizer=algo,\n", - " fidelities_to_spend=600\n", - " )\n", - "\n", - "plt.hist([v for n,v in enumerate(global_values) if n % 4 == 0], alpha=0.5, label='Neps HB with uniform sampler',bins=10)\n", - "plt.hist([v+1 for n,v in enumerate(global_values) if n % 4 == 1], alpha=0.5, label='HB with uniform sampler',bins=10)\n", - "plt.hist([v+2 for n,v in enumerate(global_values) if n % 4 == 2], alpha=0.5, label='Neps HB with prior sampler',bins=10)\n", - "plt.hist([v+3 for n,v in enumerate(global_values) if n % 4 == 3], alpha=0.5, label='HB with prior sampler',bins=10)\n", - "plt.legend()\n", - "plt.show()\n" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "70b97bfb", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Count of 1 in algo 0: 82\n", - "Count of 3 in algo 0: 52\n", - "Count of 11 in algo 0: 16\n", - "Count of 33 in algo 0: 2\n", - "Count of 1 in algo 1: 80\n", - "Count of 3 in algo 1: 56\n", - "Count of 11 in algo 1: 14\n", - "Count of 33 in algo 1: 4\n", - "Count of 1 in algo 2: 82\n", - "Count of 3 in algo 2: 52\n", - "Count of 11 in algo 2: 16\n", - "Count of 33 in algo 2: 2\n", - "Count of 1 in algo 3: 80\n", - "Count of 3 in algo 3: 56\n", - "Count of 11 in algo 3: 14\n", - "Count of 33 in algo 3: 4\n" - ] - } - ], - "source": [ - "for i in range(4):\n", - " for j in [v for v in range(100) if v in global_values]:\n", - " print(f\"Count of {j:<3} in algo {i}: \", [v for n,v in enumerate(global_values) if n % 4 == i].count(j))\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "neural-pipeline-search (3.13.1)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.1" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/neps_examples/basic_usage/priors_test.ipynb b/neps_examples/basic_usage/priors_test.ipynb deleted file mode 100644 index 7a4dd4b9d..000000000 --- a/neps_examples/basic_usage/priors_test.ipynb +++ /dev/null @@ -1,320 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 4, - "id": "180fcb7f", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Original pipeline:\n", - "PipelineSpace SimpleSpace with parameters:\n", - "\tint_param1 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.LOW)\n", - "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\n", - "==================================================\n", - "After removing 'int_param1' (in-place):\n", - "PipelineSpace SimpleSpace with parameters:\n", - "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\n", - "==================================================\n", - "After adding 'int_param1' (in-place):\n", - "PipelineSpace SimpleSpace with parameters:\n", - "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tint_param1 = Float(0.0, 1.0, prior=, prior_confidence=)\n", - "\n", - "==================================================\n", - "After removing 'int_param1' (in-place):\n", - "PipelineSpace SimpleSpace with parameters:\n", - "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import torch\n", - "import torch.nn as nn\n", - "import neps\n", - "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled\n", - "\n", - "# Define the NEPS space for the neural network architecture\n", - "class SimpleSpace(PipelineSpace):\n", - " int_param1 = neps.Integer(1,100, prior=50, prior_confidence=\"low\")\n", - " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\")\n", - " int_param3 = neps.Integer(1,100, prior=50, prior_confidence=\"high\")\n", - "\n", - "class OtherSpace(PipelineSpace):\n", - " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\", log=False)\n", - "\n", - "# Test in-place operations\n", - "pipeline = SimpleSpace()\n", - "print(\"Original pipeline:\")\n", - "print(pipeline)\n", - "\n", - "print(\"\\n\" + \"=\"*50)\n", - "print(\"After removing 'int_param1' (in-place):\")\n", - "pipeline.remove(\"int_param1\") # This modifies pipeline in-place\n", - "print(pipeline)\n", - "\n", - "print(\"\\n\" + \"=\"*50)\n", - "print(\"After adding 'int_param1' (in-place):\")\n", - "pipeline.add(neps.Float(0.0, 1.0), \"int_param1\") # This also modifies in-place\n", - "print(pipeline)\n", - "\n", - "print(\"\\n\" + \"=\"*50)\n", - "print(\"After removing 'int_param1' (in-place):\")\n", - "pipeline.remove(\"int_param1\") # This modifies pipeline in-place\n", - "print(pipeline)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "efd7be91", - "metadata": {}, - "outputs": [], - "source": [ - "# Test the add method as well\n", - "print(\"Testing add method...\")\n", - "result_add = fresh_pipeline.add(neps.Float(0, 1), \"new_float\")\n", - "print(f\"Add returned same object? {result_add is fresh_pipeline}\")\n", - "\n", - "print(\"\\nAfter adding new_float:\")\n", - "print(fresh_pipeline)\n", - "\n", - "# Test method chaining\n", - "print(\"\\nTesting method chaining...\")\n", - "fresh_pipeline.remove(\"param_b\").add(neps.Categorical([\"x\", \"y\", \"z\"]), \"new_cat\")\n", - "print(\"After chaining remove + add:\")\n", - "print(fresh_pipeline)" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "84c7766b", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\Users\\Amega\\Git\\neps\\.venv\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "PipelineSpace SimpleSpace with parameters:\n", - "\tint_param2 = Integer(1, 100, log, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tcategorical_param = Categorical(choices=('a', 'b', 'c'), prior=0, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tfloat_param = Float(0, 1.0, prior=0.5, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tfidelity_param = Fidelity(Integer(1, 10))\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "C:\\Users\\Amega\\Git\\neps\\neps\\space\\domain.py:287: UserWarning: Decoded value is above the upper bound of the domain. Clipping to the upper bound. This is likely due floating point precision in `torch.exp(x)` with torch.float64.\n", - " warnings.warn( # noqa: B028\n" - ] - } - ], - "source": [ - "import neps\n", - "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled\n", - "from neps.space.neps_spaces import sampling\n", - "from neps.space.neps_spaces import neps_space\n", - "from functools import partial\n", - "from pprint import pprint\n", - "\n", - "# Define the NEPS space for the neural network architecture\n", - "class SimpleSpace(PipelineSpace):\n", - " int_param2 = neps.Integer(1,100, prior=50, log=True, prior_confidence=\"medium\")\n", - " categorical_param = Categorical((\"a\", \"b\", \"c\"), prior=0, prior_confidence=\"high\")\n", - " float_param = neps.Float(0, 1.0, prior=0.5, prior_confidence=\"high\")\n", - " fidelity_param = neps.Fidelity(neps.Integer(1, 10))\n", - "\n", - "old_space = neps.SearchSpace({\n", - " \"int_param2\": neps.HPOInteger(1,100, log=True, prior=50, prior_confidence=\"medium\"),\n", - " \"categorical_param\": neps.HPOCategorical([\"a\", \"b\", \"c\"], prior=\"a\", prior_confidence=\"high\"),\n", - " \"float_param\": neps.HPOFloat(0, 1.0, prior=0.5, prior_confidence=\"high\"),\n", - " \"fidelity_param\": neps.HPOInteger(1, 10,is_fidelity=True)\n", - "})\n", - "pipeline = SimpleSpace()\n", - "converted_space = neps.space.neps_spaces.neps_space.convert_classic_to_neps_search_space(old_space)\n", - "\n", - "# for name in converted_space.get_attrs().keys():\n", - "# param = converted_space.get_attrs()[name]\n", - "# print(\"\\n Converted\",name)\n", - "# if isinstance(param, neps.Fidelity):\n", - "# print(param._domain.get_attrs())\n", - "# else:\n", - "# pprint(param.get_attrs())\n", - "\n", - "# param = pipeline.get_attrs()[name]\n", - "# print(\"\\n Pipeline\",name)\n", - "# if isinstance(param, neps.Fidelity):\n", - "# print(param._domain.get_attrs())\n", - "# else:\n", - "# pprint(param.get_attrs())\n", - "\n", - "print(pipeline)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "59280930", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:neps.api:Starting neps.run using root directory results/fidelity_ignore_test\n", - "WARNING:neps.optimizers.algorithms:Warning: No priors are defined in the search space, priorband will sample uniformly. Consider using hyperband instead.\n", - "INFO:neps.runtime:Overwriting optimization directory 'results\\fidelity_ignore_test' as `overwrite_optimization_dir=True`.\n", - "INFO:neps.runtime:Launching NePS\n", - "INFO:neps.runtime:Worker '20016-2025-07-11T18:31:10.131893+00:00' sampled new trial: 1_0.\n", - "INFO:neps.state.pipeline_eval:Successful evaluation of '1_0': 68.\n", - "INFO:neps.runtime:Worker '20016-2025-07-11T18:31:10.131893+00:00' evaluated trial: 1_0 as State.SUCCESS.\n", - "INFO:neps.runtime:The total number of evaluations has reached the maximum allowed of `self.settings.max_evaluations_total=1`. To allow more evaluations, increase this value or use a different stopping criterion.\n", - "INFO:neps.api:The post run summary has been created, which is a csv file with the output of all data in the run.\n", - "You can find a full dataframe at: C:\\Users\\Amega\\Git\\neps\\neps_examples\\basic_usage\\results\\fidelity_ignore_test\\summary\\full.csv.\n", - "You can find a quick summary at: C:\\Users\\Amega\\Git\\neps\\neps_examples\\basic_usage\\results\\fidelity_ignore_test\\summary\\short.csv.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "# Configs: 1\n", - "\n", - " success: 1\n", - "\n", - "# Best Found (config 1_0):\n", - "\n", - " objective_to_minimize: 68.0\n", - " config: int_param1\n", - " (68)\n", - " \t01 :: 68\n", - " config: fidelity_param\n", - " (1)\n", - " \t01 :: 1\n", - " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\basic_usage\\results\\fidelity_ignore_test\\configs\\config_1_0\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import torch\n", - "import torch.nn as nn\n", - "import neps\n", - "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled\n", - "from neps.space.neps_spaces import sampling\n", - "from neps.space.neps_spaces import neps_space\n", - "from functools import partial\n", - "\n", - "# Define the NEPS space for the neural network architecture\n", - "class SimpleSpace(PipelineSpace):\n", - " int_param1 = neps.Integer(1,100)#, prior=50, prior_confidence=\"low\")\n", - " # int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\")\n", - " # int_param3 = neps.Integer(1,100, prior=50, prior_confidence=\"high\")\n", - " # int_param4 = neps.Integer(1,3, prior=2, prior_confidence=\"low\")\n", - " # categorical_param = Categorical((\"a\", \"b\", int_param1))\n", - " # float_param = neps.Float(0, 1.0, prior=0.5, prior_confidence=\"high\")\n", - " fidelity_param = neps.Fidelity(neps.Integer(1, 10))\n", - "\n", - "# Sampling and printing one random configuration of the pipeline\n", - "pipeline = SimpleSpace()\n", - "\n", - "def evaluate_pipeline(int_param1,**_args):#, fidelity_param, categorical_param):\n", - " return int_param1\n", - "\n", - "for i in range(1):\n", - " # resolved_pipeline, resolution_context = neps_space.resolve(pipeline,domain_sampler=sampler)\n", - " new_rs=neps.algorithms.NePSRandomSearch(pipeline,ignore_fidelity=True)\n", - " # old_rs=neps.algorithms.random_search(pipeline,ignore_fidelity=True)\n", - " # print(new_rs({},None))\n", - "\n", - " # s = resolved_pipeline.int_param1\n", - " # print(resolved_pipeline.get_attrs())\n", - " import logging\n", - "\n", - " logging.basicConfig(level=logging.INFO)\n", - " neps.run(evaluate_pipeline,pipeline,root_directory=\"results/fidelity_ignore_test\",overwrite_root_directory=True,optimizer=neps.algorithms.neps_priorband, evaluations_to_spend=1)\n", - " neps.status(\"results/fidelity_ignore_test\",print_summary=True, pipeline_space_variables=(SimpleSpace(),[\"int_param1\", \"fidelity_param\"]))" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "94af7ec4", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(,)\n", - "(,)\n" - ] - } - ], - "source": [ - "from neps.space.neps_spaces import parameters, sampling, neps_space\n", - "\n", - "class TestSpace(parameters.PipelineSpace):\n", - " cat_var = parameters.Categorical((\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\"))\n", - " cat_var_choice_2 = parameters.Categorical(\n", - " (\n", - " (\n", - " parameters.Resampled(cat_var),\n", - " ),\n", - " )\n", - " )\n", - " reresampled_var = parameters.Resampled(cat_var_choice_2)\n", - " reresampled_var2 = parameters.Resampled(cat_var_choice_2)\n", - "\n", - "random_sampler = sampling.RandomSampler({})\n", - "sampler = sampling.PriorOrFallbackSampler(fallback_sampler=random_sampler, always_use_prior=False)\n", - "\n", - "resolved_pipeline, resolution_context = neps_space.resolve(TestSpace(),domain_sampler=random_sampler)\n", - "print(resolved_pipeline.reresampled_var)\n", - "print(resolved_pipeline.reresampled_var2)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "neural-pipeline-search (3.13.1)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.1" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/neps_examples/efficiency/warmstarting.py b/neps_examples/efficiency/warmstarting.py deleted file mode 100644 index 6506cc8a2..000000000 --- a/neps_examples/efficiency/warmstarting.py +++ /dev/null @@ -1,96 +0,0 @@ -import neps -import logging -from neps import PipelineSpace, Integer, Float, Fidelity, Operation, Categorical -from neps.space.neps_spaces import neps_space - - -def operation(x): - """A simple operation that can be used in the pipeline.""" - return x - - -class SimpleSpace(PipelineSpace): - int_param = Integer(0, 10) - float_param = Float(0.0, 1.0) - epochs = Fidelity(Integer(1, 5)) - # cat_param = Categorical((float_param, int_param)) - # op = Operation(operation, args=(float_param, int_param)) - # op2 = Operation("Test") - # op3 = Operation("Test2", args=(int_param,)) - - -# Sampling a random configuration of the pipeline, which will be used for warmstarting -pipeline = SimpleSpace() -resolved_pipeline, resolution_context = neps_space.resolve( - pipeline, environment_values={"epochs": 5} -) -# for operator in (resolved_pipeline.op, resolved_pipeline.op2, resolved_pipeline.op3): -# print("Resolved Pipeline:", operator) -# if callable(operator): -# print("Callable:", neps_space.convert_operation_to_callable(operator).__str__()) -# print(neps_space.convert_operation_to_string(resolved_pipeline.op)) -# print( -# neps_space.config_string.ConfigString( -# neps_space.convert_operation_to_string(operator) -# ).pretty_format() -# ) -# print( -# neps_space.config_string.ConfigString( -# neps_space.convert_operation_to_string(operator) -# ) -# ) -# print( -# neps_space.config_string.ConfigString( -# neps_space.convert_operation_to_string(operator) -# ).unwrapped -# ) - - -def evaluate_pipeline(int_param, float_param, epochs=5, **kwargs) -> dict[str, float]: - return {"objective_to_minimize": -(int_param + float_param) * epochs, "cost": epochs} - - -wanted_config = resolution_context.samplings_made -wanted_env = resolution_context.environment_values -wanted_result = evaluate_pipeline(**resolved_pipeline.get_attrs()) -warmstarting_configs = [ - (wanted_config, wanted_env, wanted_result), - # (wanted_config, {"epochs": 2}, wanted_result), -] - -from functools import partial - -# Running the NEPS pipeline with warmstarting -logging.basicConfig(level=logging.INFO) -neps.warmstart_neps( - pipeline, - "results/warmstart_example/", - warmstarting_configs, - overwrite_root_directory=True, - optimizer=partial( - neps.algorithms.neps_random_search, - use_priors=True, - ignore_fidelity="highest fidelity", - ), -) -neps.run( - evaluate_pipeline=evaluate_pipeline, - pipeline_space=SimpleSpace(), - root_directory="results/warmstart_example/", - max_evaluations_per_run=5, - optimizer=partial( - neps.algorithms.neps_random_search, - use_priors=True, - ignore_fidelity="highest fidelity", - ), - # warmstart_configs=warmstarting_configs, - overwrite_root_directory=False, -) -neps.status( - "results/warmstart_example", - print_summary=True, - pipeline_space_variables=( - SimpleSpace(), - ["int_param", "float_param", "epochs"], # , "op", "op2", "op3", "cat_param"], - ), -) diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/neps_examples/test_files/test_neps_integration_priorband__max_cost.py similarity index 100% rename from tests/test_neps_space/test_neps_integration_priorband__max_cost.py rename to neps_examples/test_files/test_neps_integration_priorband__max_cost.py From 89f33463d42b0e4d276a8b7ed2cd059f874c327b Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 23 Oct 2025 14:45:24 +0200 Subject: [PATCH 089/156] feat: Refactor pipeline space definition into a class and add new integration tests for hyperparameter optimization with fidelity --- neps_examples/convenience/async_evaluation/submit.py | 9 ++++----- .../test_neps_integration_priorband__max_cost.py | 0 2 files changed, 4 insertions(+), 5 deletions(-) rename {neps_examples/test_files => tests/test_neps_space}/test_neps_integration_priorband__max_cost.py (100%) diff --git a/neps_examples/convenience/async_evaluation/submit.py b/neps_examples/convenience/async_evaluation/submit.py index f7021dee9..7bb9c6d4b 100644 --- a/neps_examples/convenience/async_evaluation/submit.py +++ b/neps_examples/convenience/async_evaluation/submit.py @@ -34,14 +34,13 @@ def evaluate_pipeline_via_slurm(pipeline_id, pipeline_directory, previous_pipeli return None -pipeline_space = dict( - optimizer=neps.Categorical(choices=["sgd", "adam"]), - lr=neps.Float(lower=10e-7, upper=10e-3, log=True), -) +class ExampleSpace(neps.PipelineSpace): + optimizer=neps.Categorical(choices=["sgd", "adam"]) + lr=neps.Float(lower=10e-7, upper=10e-3, log=True) neps.run( evaluate_pipeline=evaluate_pipeline_via_slurm, - pipeline_space=pipeline_space, + pipeline_space=ExampleSpace(), root_directory="results", max_evaluations_per_run=2, ) diff --git a/neps_examples/test_files/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py similarity index 100% rename from neps_examples/test_files/test_neps_integration_priorband__max_cost.py rename to tests/test_neps_space/test_neps_integration_priorband__max_cost.py From 0c519ce074c3c823c4a3151cbec9f17f9ce06eac Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 26 Oct 2025 23:28:25 +0100 Subject: [PATCH 090/156] fix: update confidence level in MutatateUsingCentersSampler to MEDIUM for improved sampling accuracy --- neps/space/neps_spaces/sampling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neps/space/neps_spaces/sampling.py b/neps/space/neps_spaces/sampling.py index b67465c99..fe6a86a47 100644 --- a/neps/space/neps_spaces/sampling.py +++ b/neps/space/neps_spaces/sampling.py @@ -405,7 +405,7 @@ def __call__( original_value = self._original_samplings_to_make[current_path] sampled_value = domain_obj.centered_around( center=original_value, - confidence=ConfidenceLevel.HIGH, + confidence=ConfidenceLevel.MEDIUM, ).sample() else: # We never had a value for this path, we can only sample from the domain. From d83040df5c420ae028094919859073f4eafe228e Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 26 Oct 2025 23:28:50 +0100 Subject: [PATCH 091/156] fix: improve formatting in wrap_config_into_string for better readability --- neps/space/neps_spaces/config_string.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/neps/space/neps_spaces/config_string.py b/neps/space/neps_spaces/config_string.py index 395820d78..7cb7a4bfe 100644 --- a/neps/space/neps_spaces/config_string.py +++ b/neps/space/neps_spaces/config_string.py @@ -128,29 +128,28 @@ def wrap_config_into_string( if item.level > current_level: if item.hyperparameters not in ("{}", ""): value = ( - " (" + "(" + str( item.operator.__name__ if callable(item.operator) else item.operator ) - + " " + item.hyperparameters ) else: - value = " (" + str( + value = "(" + str( item.operator.__name__ if callable(item.operator) else item.operator ) elif item.level < current_level: value = ( - ")" * (current_level - item.level + 1) - + " (" + ")" * (current_level - item.level) + + ", " + str( item.operator.__name__ if callable(item.operator) else item.operator ) ) else: - value = ") (" + str( + value = ", " + str( item.operator.__name__ if callable(item.operator) else item.operator ) current_level = item.level @@ -158,6 +157,8 @@ def wrap_config_into_string( result.append(")" * current_level) result_string = "".join(result).strip() + if result_string.startswith("("): + result_string = result_string[1:-1].strip() # A workaround needed since in the existing configurations # generated by previous methods, e.g. the `resBlock resBlock` and `resBlock` items From 5496fd8844a40155c26e15ef5dcc55389793c849 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 28 Oct 2025 22:42:38 +0100 Subject: [PATCH 092/156] Refactor config string handling and enhance tests - Improved the `unwrap_config_string` and `wrap_config_into_string` functions to better handle nested operations and hyperparameters. - Added comprehensive unit tests for the `config_string` module, ensuring round-trip preservation of configuration strings. - Updated the `SamplingResolutionContext` class to simplify object resolution logic and remove unnecessary path tracking. - Refactored the `Fidelity`, `PipelineSpace`, and `Operation` classes to enhance clarity and maintainability. - Adjusted grammar-like test cases to reflect the new configuration string format. --- neps/runtime.py | 2 +- neps/space/neps_spaces/config_string.py | 159 +++++++--- neps/space/neps_spaces/neps_space.py | 272 +++++++---------- neps/space/neps_spaces/parameters.py | 134 +++----- tests/test_neps_space/test_config_string.py | 287 ++++++++++++++++++ .../test_search_space__grammar_like.py | 34 +-- 6 files changed, 570 insertions(+), 318 deletions(-) create mode 100644 tests/test_neps_space/test_config_string.py diff --git a/neps/runtime.py b/neps/runtime.py index 091a050ea..f0473a2e1 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -588,7 +588,7 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 logger.info( "Summary files can be found in the “summary” folder inside" - "the root directory: %s", + " the root directory: %s", summary_dir, ) diff --git a/neps/space/neps_spaces/config_string.py b/neps/space/neps_spaces/config_string.py index 7cb7a4bfe..9b12507ee 100644 --- a/neps/space/neps_spaces/config_string.py +++ b/neps/space/neps_spaces/config_string.py @@ -49,6 +49,19 @@ def unwrap_config_string(config_string: str) -> tuple[UnwrappedConfigStringPart, A tuple of `UnwrappedConfigStringPart` objects representing the unwrapped configuration string. """ + # Handle simple strings with no parentheses + if "(" not in config_string: + # This is a simple operator name with no nested structure + # Create a single unwrapped part for it + item = UnwrappedConfigStringPart( + level=1, + opening_index=0, + operator=config_string.strip(), + hyperparameters="{}", + operands="", + ) + return (item,) + # A workaround needed since in the existing configurations # generated by previous methods, e.g. the `resBlock resBlock` and `resBlock` items # occur without wrapping parenthesis, differently from other items. @@ -79,13 +92,25 @@ def unwrap_config_string(config_string: str) -> tuple[UnwrappedConfigStringPart, start_char_index, opening_index = stack.pop() level = len(stack) + 1 # start level counting from 1 and not 0 - value_single = config_string[start_char_index + 1 : current_char_index] - value = value_single.split(" (", maxsplit=1) - operator = value[0] - operands = "(" + value[1] if len(value) > 1 else "" + # Extract the content between the matching parentheses + content_inside_parens = config_string[ + start_char_index + 1 : current_char_index + ] + + # Find the operator name by looking backwards from the opening parenthesis + # to find where the operator name starts (either start of string, or after a + # comma/space) + operator_start = start_char_index - 1 + while operator_start >= 0 and config_string[operator_start] not in ",(": + operator_start -= 1 + operator_start += 1 # Move forward to the first character of the operator + + operator = config_string[operator_start:start_char_index] + operands = content_inside_parens + # Handle hyperparameters enclosed in curly braces if " {" in operator: - operator, hyperparameters = operator.split(" {") + operator, hyperparameters = operator.split(" {", maxsplit=1) hyperparameters = "{" + hyperparameters else: hyperparameters = "{}" @@ -105,7 +130,7 @@ def unwrap_config_string(config_string: str) -> tuple[UnwrappedConfigStringPart, # Current profiling shows this function does not run that often # so no need for caching -def wrap_config_into_string( +def wrap_config_into_string( # noqa: C901, PLR0915 unwrapped_config: tuple[UnwrappedConfigStringPart, ...], max_level: int | None = None, ) -> str: @@ -119,46 +144,92 @@ def wrap_config_into_string( Returns: The string representation of the unwrapped config. """ - result = [] - current_level = 0 + if not unwrapped_config: + return "" + + # Build a tree structure from the unwrapped parts + # Group children by their parent's (level, opening_index) + children_by_parent: dict[tuple[int, int], list[UnwrappedConfigStringPart]] = {} for item in unwrapped_config: if max_level is not None and item.level > max_level: continue + parent_key = ( + (item.level - 1, item.opening_index - 1) if item.level > 1 else (0, -1) + ) + if parent_key not in children_by_parent: + children_by_parent[parent_key] = [] + children_by_parent[parent_key].append(item) + + def reconstruct(item: UnwrappedConfigStringPart) -> str: # noqa: C901, PLR0912 + """Recursively reconstruct the config string for an item.""" + operator_name = ( + item.operator.__name__ if callable(item.operator) else item.operator + ) - if item.level > current_level: - if item.hyperparameters not in ("{}", ""): - value = ( - "(" - + str( - item.operator.__name__ - if callable(item.operator) - else item.operator - ) - + item.hyperparameters - ) + # Get children of this item + item_key = (item.level, item.opening_index) + children = children_by_parent.get(item_key, []) + + # Reconstruct operands + if not item.operands and not children: + # No operands at all - just return the operator name + return operator_name + if not children: + # Leaf node - just wrap the operands + return f"{operator_name}({item.operands})" + # Has children - need to mix nested and non-nested operands + # Parse operands to separate nested from non-nested + parts = [] + if item.operands: + current_part = [] # type: ignore[var-annotated] + paren_depth = 0 + for char in item.operands: + if char == "(": + paren_depth += 1 + elif char == ")": + paren_depth -= 1 + elif char == "," and paren_depth == 0: + parts.append("".join(current_part).strip()) + current_part = [] + continue + current_part.append(char) + if current_part: + parts.append("".join(current_part).strip()) + + # Reconstruct each part - nested ones recursively, non-nested as-is + reconstructed_parts = [] + child_idx = 0 + for part in parts: + if "(" in part: + # This is a nested structure - should have a corresponding child + if child_idx < len(children): + reconstructed_parts.append(reconstruct(children[child_idx])) + child_idx += 1 + else: + # Shouldn't happen, but fallback + reconstructed_parts.append(part) else: - value = "(" + str( - item.operator.__name__ if callable(item.operator) else item.operator - ) - elif item.level < current_level: - value = ( - ")" * (current_level - item.level) - + ", " - + str( - item.operator.__name__ if callable(item.operator) else item.operator - ) - ) - else: - value = ", " + str( - item.operator.__name__ if callable(item.operator) else item.operator - ) - current_level = item.level - result.append(value) - result.append(")" * current_level) + # Non-nested operand - just add it + reconstructed_parts.append(part) + + # Add any remaining children that weren't referenced in operands + while child_idx < len(children): + reconstructed_parts.append(reconstruct(children[child_idx])) + child_idx += 1 + + return f"{operator_name}({', '.join(reconstructed_parts)})" + + # Start reconstruction from the root (level 1 items) + root_items = [item for item in unwrapped_config if item.level == 1] + if not root_items: + return "" - result_string = "".join(result).strip() - if result_string.startswith("("): - result_string = result_string[1:-1].strip() + # If there's only one root, reconstruct it + if len(root_items) == 1: + result = reconstruct(root_items[0]) + else: + # Multiple roots - join with commas + result = ", ".join(reconstruct(item) for item in root_items) # A workaround needed since in the existing configurations # generated by previous methods, e.g. the `resBlock resBlock` and `resBlock` items @@ -171,11 +242,11 @@ def wrap_config_into_string( ("id", False), ] for op, replace_individual in replacements: - result_string = result_string.replace(f"({op} {op})", "__TMP_PLACEHOLDER___") + result = result.replace(f"({op} {op})", "__TMP_PLACEHOLDER___") if replace_individual: - result_string = result_string.replace(f"({op})", f"{op}") - result_string = result_string.replace("__TMP_PLACEHOLDER___", f"{op} {op}") - return result_string + result = result.replace(f"({op})", f"{op}") + result = result.replace("__TMP_PLACEHOLDER___", f"{op} {op}") + return result class ConfigString: diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index ebee6c7cb..c608a8932 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -116,10 +116,6 @@ def __init__( # `_resolved_objects` stores the intermediate values to make re-use possible. self._resolved_objects: dict[Any, Any] = {} - # `_sampled_domains` tracks domain objects that have been sampled from by - # (id, path) key - self._sampled_domains: set[tuple[int, str]] = set() - # `_current_path_parts` stores the current path we are resolving. self._current_path_parts: list[str] = [] @@ -185,12 +181,7 @@ def was_already_resolved(self, obj: Any) -> bool: Returns: True if the object was already resolved, False otherwise. """ - try: - # Try to use the object itself as a key (works for hashable objects) - return obj in self._resolved_objects - except TypeError: - # If the object is not hashable, fall back to using its id - return id(obj) in self._resolved_objects + return obj in self._resolved_objects def add_resolved(self, original: Any, resolved: Any) -> None: """Add a resolved object to the context. @@ -214,12 +205,7 @@ def add_resolved(self, original: Any, resolved: Any) -> None: raise ValueError( f"Attempting to add a Resampled object to resolved values: {original!r}." ) - try: - # Try to use the object itself as a key (works for hashable objects) - self._resolved_objects[original] = resolved - except TypeError: - # If the object is not hashable, fall back to using its id - self._resolved_objects[id(original)] = resolved + self._resolved_objects[original] = resolved def get_resolved(self, obj: Any) -> Any: """Get the resolved value for the given object. @@ -234,23 +220,11 @@ def get_resolved(self, obj: Any) -> Any: ValueError: If the object was not already resolved in the context. """ try: - # Try to use the object itself as a key (works for hashable objects) return self._resolved_objects[obj] - except (KeyError, TypeError) as err: - if isinstance(err, TypeError): - # If the object is not hashable, try using its id - try: - return self._resolved_objects[id(obj)] - except KeyError as id_err: - raise ValueError( - "Given object was not already resolved. Please check first:" - f" {obj!r}" - ) from id_err - else: - # KeyError - object wasn't found - raise ValueError( - f"Given object was not already resolved. Please check first: {obj!r}" - ) from err + except KeyError as err: + raise ValueError( + f"Given object was not already resolved. Please check first: {obj!r}" + ) from err def sample_from(self, domain_obj: Domain) -> Any: """Sample a value from the given domain object. @@ -265,6 +239,18 @@ def sample_from(self, domain_obj: Domain) -> Any: ValueError: If the domain object was already resolved or if the path has already been sampled from. """ + # Each `domain_obj` is only ever sampled from once. + # This is okay and the expected behavior. + # For each `domain_obj`, its sampled value is either directly stored itself, + # or is used in some other Resolvable. + # In both cases that sampled value is cached for later uses, + # and so the `domain_obj` will not be re-sampled from again. + if self.was_already_resolved(domain_obj): + raise ValueError( + "We have already sampled a value for the given domain object:" + f" {domain_obj!r}." + "\nThis should not be happening." + ) + # The range compatibility identifier is there to make sure when we say # the path matches, that the range for the value we are looking up also matches. domain_obj_type_name = type(domain_obj).__name__.lower() @@ -284,25 +270,12 @@ def sample_from(self, domain_obj: Domain) -> Any: + "\nThis should not be happening." ) - # For domain object tracking, create a key that includes both domain ID and path - # context - # This allows the same domain to be sampled in different resampled contexts - domain_path_key = (id(domain_obj), current_path) - - if domain_path_key in self._sampled_domains: - raise ValueError( - "We have already sampled a value for the given domain object:" - f" {domain_obj!r} at path {current_path!r}." - + "\nThis should not be happening." - ) - sampled_value = self._domain_sampler( domain_obj=domain_obj, current_path=current_path, ) self._samplings_made[current_path] = sampled_value - self._sampled_domains.add(domain_path_key) return self._samplings_made[current_path] def get_value_from_environment(self, var_name: str) -> Any: @@ -324,54 +297,6 @@ def get_value_from_environment(self, var_name: str) -> Any: f"No value is available for the environment variable {var_name!r}." ) from err - def was_already_resolved_with_path(self, obj: Any) -> bool: - """Check if the given object was already resolved with current path. - - Args: - obj: The object to check if it was already resolved. - - Returns: - True if the object was already resolved with current path, False otherwise. - """ - current_path = ".".join(self._current_path_parts) - # Use object identity (id) instead of equality to ensure different instances - # are treated separately even if they're equal - cache_key = (id(obj), current_path) - return cache_key in self._resolved_objects - - def get_resolved_with_path(self, obj: Any) -> Any: - """Get the resolved value for the given object with current path. - - Args: - obj: The object for which to get the resolved value. - - Returns: - The resolved value of the object. - - Raises: - ValueError: If the object was not already resolved with current path. - """ - current_path = ".".join(self._current_path_parts) - cache_key = (id(obj), current_path) - try: - return self._resolved_objects[cache_key] - except KeyError as err: - raise ValueError( - f"Given object was not already resolved with path {current_path!r}:" - f" {obj!r}" - ) from err - - def add_resolved_with_path(self, original: Any, resolved: Any) -> None: - """Add a resolved object to the context with current path. - - Args: - original: The original object that was resolved. - resolved: The resolved value of the original object. - """ - current_path = ".".join(self._current_path_parts) - cache_key = (id(original), current_path) - self._resolved_objects[cache_key] = resolved - class SamplingResolver: """A class responsible for resolving samplings in a NePS space. @@ -461,20 +386,9 @@ def _( domain_obj: Domain, context: SamplingResolutionContext, ) -> Any: - # Check if we're in a resampled context (path contains "resampled_") - current_path = ".".join(context._current_path_parts) - is_resampled_context = "resampled_" in current_path - - # Always check object-identity cache first for shared objects if context.was_already_resolved(domain_obj): return context.get_resolved(domain_obj) - # In non-resampled contexts, also check path-specific cache - if not is_resampled_context and context.was_already_resolved_with_path( - domain_obj - ): - return context.get_resolved_with_path(domain_obj) - initial_attrs = domain_obj.get_attrs() final_attrs = {} needed_resolving = False @@ -496,16 +410,7 @@ def _( raise ValueError(f"Failed to sample from {resolved_domain_obj!r}.") from e result = self._resolve(sampled_value, "sampled_value", context) - # Cache the result - if not is_resampled_context: - # In normal contexts, cache with both object identity and path - context.add_resolved(domain_obj, result) - context.add_resolved_with_path(domain_obj, result) - elif not context.was_already_resolved(domain_obj): - # In resampled contexts, cache shared objects for reuse across contexts - # but skip path-based caching to prevent unwanted dependencies - context.add_resolved(domain_obj, result) - + context.add_resolved(domain_obj, result) return result @_resolver_dispatch.register @@ -631,6 +536,17 @@ def _( for attr_name, initial_attr_value in initial_attrs.items(): resolved_attr_value = self._resolve(initial_attr_value, attr_name, context) + + # Special handling for 'args': if it was a Resolvable that resolved to a + # non-iterable, wrap it in a tuple since Operation expects args to be a + # sequence + if ( + attr_name == "args" + and isinstance(initial_attr_value, Resolvable) + and not isinstance(resolved_attr_value, tuple | list | Resolvable) + ): + resolved_attr_value = (resolved_attr_value,) + final_attrs[attr_name] = resolved_attr_value needed_resolving = needed_resolving or ( initial_attr_value is not resolved_attr_value @@ -929,9 +845,7 @@ def convert_operation_to_callable(operation: Operation) -> Callable: operator = cast(Callable, operation.operator) operation_args: list[Any] = [] - for arg in ( - operation.args if isinstance(operation.args, tuple | list) else (operation.args,) - ): + for arg in operation.args: if isinstance(arg, tuple | list): arg_sequence: list[Any] = [] for a in arg: @@ -967,73 +881,117 @@ def convert_operation_to_callable(operation: Operation) -> Callable: if isinstance(kwarg_value, Operation) else kwarg_value ) + return cast(Callable, operator(*operation_args, **operation_kwargs)) +def _serialize_operation(operation: Operation | str | Callable) -> str: + """Serialize an operation to its string representation. + + This is a helper function to convert Operation objects to strings + for inclusion in the operands field of UnwrappedConfigStringPart. + """ + if isinstance(operation, str): + return operation + + # Handle non-Operation objects (e.g., resolved PyTorch modules, integers, etc.) + if not isinstance(operation, Operation): + return str(operation) + + # For Operation objects, build the string representation + operator_name = ( + operation.operator + if isinstance(operation.operator, str) + else operation.operator.__name__ + ) + + if not operation.args: + # No operands - just return operator name + return operator_name + + # Recursively serialize operands + operand_strs = [_serialize_operation(arg) for arg in operation.args] + return f"{operator_name}({', '.join(operand_strs)})" + + def _operation_to_unwrapped_config( - operation: Operation | str | Resolvable, + operation: Operation | str, level: int = 1, -) -> list[config_string.UnwrappedConfigStringPart]: + opening_index: int = 0, +) -> tuple[list[config_string.UnwrappedConfigStringPart], int]: + """Convert an Operation to unwrapped config parts. + + Returns: + A tuple of (list of parts, next available opening_index) + """ result = [] if isinstance(operation, Operation): operator = operation.operator kwargs = str(operation.kwargs) + + # Build operands string and collect child parts + operand_strs = [] + all_child_parts = [] + next_opening = opening_index + 1 + + for operand in operation.args: + if isinstance(operand, Operation): + # Only create child parts if the operation has operands + # (otherwise it's just a simple name like "ReLU") + if operand.args: + # Recursively get unwrapped parts for the nested operation + child_parts, next_opening = _operation_to_unwrapped_config( + operand, level + 1, next_opening + ) + all_child_parts.extend(child_parts) + # Serialize this operand to a string for the operands field + operand_strs.append(_serialize_operation(operand)) + else: + operand_strs.append(str(operand)) + + # Create operands string + operands_str = ", ".join(operand_strs) + item = config_string.UnwrappedConfigStringPart( level=level, - opening_index=-1, + opening_index=opening_index, operator=operator, hyperparameters=kwargs, - operands="", - ) - result.append(item) - - # Handle args that might be a Resolvable or an iterable - args = operation.args - if isinstance(args, Resolvable): - # If args is a Resolvable, treat it as a single operand - result.extend(_operation_to_unwrapped_config(args, level + 1)) - else: - # If args is iterable (tuple/list), iterate over each operand - for operand in args: - result.extend(_operation_to_unwrapped_config(operand, level + 1)) - elif isinstance(operation, Resolvable): - # Handle other Resolvable types that are not Operations - item = config_string.UnwrappedConfigStringPart( - level=level, - opening_index=-1, - operator=str(operation), # Convert to string for display - hyperparameters="", - operands="", - ) - result.append(item) - else: - # Handle string operations - item = config_string.UnwrappedConfigStringPart( - level=level, - opening_index=-1, - operator=operation, - hyperparameters="", - operands="", + operands=operands_str, ) result.append(item) - return result + result.extend(all_child_parts) + + return result, next_opening + item = config_string.UnwrappedConfigStringPart( + level=level, + opening_index=opening_index, + operator=operation, + hyperparameters="", + operands="", + ) + return [item], opening_index + 1 -def convert_operation_to_string(operation: Operation) -> str: +def convert_operation_to_string(operation: Operation | str | int | float) -> str: """Convert an Operation to a string representation. Args: - operation: The Operation to convert. + operation: The Operation to convert, or a primitive value. Returns: - A string representation of the operation. + A string representation of the operation or value. Raises: ValueError: If the operation is not a valid Operation object. """ - unwrapped_config = tuple(_operation_to_unwrapped_config(operation)) - return config_string.wrap_config_into_string(unwrapped_config) + # Handle non-Operation values (resolved primitives) + if not isinstance(operation, Operation): + return str(operation) + + unwrapped_config, _ = _operation_to_unwrapped_config(operation) + return config_string.wrap_config_into_string(tuple(unwrapped_config)) # ------------------------------------------------- diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 2a98cf24c..3a1f5c31f 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -110,7 +110,7 @@ def __str__(self) -> str: """Get a string representation of the fidelity.""" return f"Fidelity({self._domain.__str__()})" - def is_equivalent_to(self, other: object) -> bool: + def compare_domain_to(self, other: object) -> bool: """Check if this fidelity parameter is equivalent to another. This method provides comparison logic without interfering with Python's @@ -127,24 +127,12 @@ def is_equivalent_to(self, other: object) -> bool: return False return self._domain == other._domain - def __eq__(self, other: object) -> bool: - """Check if this is the exact same object instance. - - This uses object identity to avoid interfering with the resolution caching system. - """ - return self is other - - def __hash__(self) -> int: - """Get hash based on object identity.""" - return id(self) - @property def min_value(self) -> int | float: """Get the minimum value of the fidelity domain. Returns: The minimum value of the fidelity domain. - """ return self._domain.min_value @@ -213,7 +201,6 @@ def get_attrs(self) -> Mapping[str, Any]: for attr_name, attr_value in vars(self.__class__).items(): if attr_name.startswith("_") or callable(attr_value) or attr_value is None: continue - # Skip if this parameter has been marked as removed attrs[attr_name] = attr_value for attr_name, attr_value in vars(self).items(): @@ -259,23 +246,15 @@ def __str__(self) -> str: ) return f"PipelineSpace {self.__class__.__name__} with parameters:\n\t{attrs}" - def __add__( + def add( self, - other: ( - Integer - | Float - | Categorical - | Operation - | Resampled - | Repeated - | PipelineSpace - ), + new_param: Integer | Float | Categorical | Operation | Resampled | Repeated, name: str | None = None, ) -> PipelineSpace: """Add a new parameter to the pipeline. Args: - other: The parameter to be added, which can be an Integer, Float, + new_param: The parameter to be added, which can be an Integer, Float, Categorical, Operation, Resampled, Repeated, or PipelineSpace. name: The name of the parameter to be added. If None, a default name will be generated. @@ -287,18 +266,18 @@ def __add__( ValueError: If the parameter is not of a supported type or if a parameter with the same name already exists in the pipeline. """ - if isinstance(other, PipelineSpace): + if isinstance(new_param, PipelineSpace): new_space = self - for exist_name, value in other.get_attrs().items(): - new_space = new_space.__add__(value, exist_name) + for exist_name, value in new_param.get_attrs().items(): + new_space = new_space.add(value, exist_name) return new_space if not isinstance( - other, Integer | Float | Categorical | Operation | Resampled | Repeated + new_param, Integer | Float | Categorical | Operation | Resampled | Repeated ): raise ValueError( "Can only add Integer, Float, Categorical, Operation, Resampled," - f" Repeated or PipelineSpace, got {other!r}." + f" Repeated or PipelineSpace, got {new_param!r}." ) param_name = name if name else f"param_{len(self.get_attrs()) + 1}" @@ -310,35 +289,19 @@ class NewSpace(PipelineSpace): new_pipeline = NewSpace() for exist_name, value in self.get_attrs().items(): setattr(new_pipeline, exist_name, value) - if exist_name == param_name and not _parameters_are_equivalent(value, other): + if exist_name == param_name and not _parameters_are_equivalent( + value, new_param + ): raise ValueError( f"A different parameter with the name {param_name!r} already exists" " in the pipeline:\n" f" {value}\n" - f" {other}" + f" {new_param}" ) if not hasattr(new_pipeline, param_name): - setattr(new_pipeline, param_name, other) + setattr(new_pipeline, param_name, new_param) return new_pipeline - def add( - self, - new_param: Integer | Float | Categorical | Operation | Resampled | Repeated, - name: str | None = None, - ) -> PipelineSpace: - """Add a new parameter to the pipeline. This is NOT an in-place operation. - - Args: - new_param: The parameter to be added, which can be an Integer, Float, - Categorical, Operation, Resampled, or Repeated domain. - name: The name of the parameter to be added. If None, a default name will - be generated. - - Returns: - A NEW PipelineSpace with the added parameter. - """ - return self.__add__(new_param, name) - def remove(self, name: str) -> PipelineSpace: """Remove a parameter from the pipeline by its name. This is NOT an in-place operation. @@ -679,7 +642,7 @@ def __str__(self) -> str: string += ")" return string - def is_equivalent_to(self, other: object) -> bool: + def compare_domain_to(self, other: object) -> bool: """Check if this categorical parameter is equivalent to another. This method provides comparison logic without interfering with Python's @@ -700,17 +663,6 @@ def is_equivalent_to(self, other: object) -> bool: and self.choices == other.choices ) - def __eq__(self, other: object) -> bool: - """Check if this is the exact same object instance. - - This uses object identity to avoid interfering with the resolution caching system. - """ - return self is other - - def __hash__(self) -> int: - """Get hash based on object identity.""" - return id(self) - @property def min_value(self) -> int: """Get the minimum value of the categorical domain. @@ -897,7 +849,7 @@ def __str__(self) -> str: string += ")" return string - def is_equivalent_to(self, other: object) -> bool: + def compare_domain_to(self, other: object) -> bool: """Check if this float parameter is equivalent to another. This method provides comparison logic without interfering with Python's @@ -920,17 +872,6 @@ def is_equivalent_to(self, other: object) -> bool: and self._log == other._log ) - def __eq__(self, other: object) -> bool: - """Check if this is the exact same object instance. - - This uses object identity to avoid interfering with the resolution caching system. - """ - return self is other - - def __hash__(self) -> int: - """Get hash based on object identity.""" - return id(self) - @property def min_value(self) -> float: """Get the minimum value of the floating-point domain. @@ -1115,7 +1056,7 @@ def __str__(self) -> str: string += ")" return string - def is_equivalent_to(self, other: object) -> bool: + def compare_domain_to(self, other: object) -> bool: """Check if this integer parameter is equivalent to another. This method provides comparison logic without interfering with Python's @@ -1138,17 +1079,6 @@ def is_equivalent_to(self, other: object) -> bool: and self._log == other._log ) - def __eq__(self, other: object) -> bool: - """Check if this is the exact same object instance. - - This uses object identity to avoid interfering with the resolution caching system. - """ - return self is other - - def __hash__(self) -> int: - """Get hash based on object identity.""" - return id(self) - @property def min_value(self) -> int: """Get the minimum value of the integer domain. @@ -1325,7 +1255,7 @@ def __str__(self) -> str: f" kwargs={self._kwargs!s})" ) - def is_equivalent_to(self, other: object) -> bool: + def compare_domain_to(self, other: object) -> bool: """Check if this operation parameter is equivalent to another. This method provides comparison logic without interfering with Python's @@ -1346,17 +1276,6 @@ def is_equivalent_to(self, other: object) -> bool: and self.kwargs == other.kwargs ) - def __eq__(self, other: object) -> bool: - """Check if this is the exact same object instance. - - This uses object identity to avoid interfering with the resolution caching system. - """ - return self is other - - def __hash__(self) -> int: - """Get hash based on object identity.""" - return id(self) - @property def operator(self) -> Callable | str: """Get the operator of the operation. @@ -1520,6 +1439,23 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: ) return self._source.from_attrs(attrs) + def compare_domain_to(self, other: object) -> bool: + """Check if this resampled parameter is equivalent to another. + + This method provides comparison logic without interfering with Python's + object identity system (unlike __eq__). Use this for functional comparisons + like checking if parameters have the same configuration. + + Args: + other: The object to compare with. + + Returns: + True if the objects are equivalent, False otherwise. + """ + if not isinstance(other, Resampled): + return False + return self.source == other.source + class Repeated(Resolvable): """A class representing a sequence where a resolvable diff --git a/tests/test_neps_space/test_config_string.py b/tests/test_neps_space/test_config_string.py new file mode 100644 index 000000000..5c3bf5f1a --- /dev/null +++ b/tests/test_neps_space/test_config_string.py @@ -0,0 +1,287 @@ +"""Tests for config_string module functions.""" + +from __future__ import annotations + +import pytest + +from neps.space.neps_spaces.config_string import ( + ConfigString, + UnwrappedConfigStringPart, + unwrap_config_string, + wrap_config_into_string, +) + + +class TestUnwrapAndWrapConfigString: + """Test the unwrap_config_string and wrap_config_into_string functions. + + The new implementation preserves the structure during round-trip unwrap->wrap. + """ + + def test_single_nested_operation(self): + """Test unwrapping and wrapping a single nested operation.""" + config_str = "Sequential(ReLU)" + unwrapped = unwrap_config_string(config_str) + wrapped = wrap_config_into_string(unwrapped) + # Round trip should preserve the input + assert wrapped == config_str + + def test_operation_with_multiple_args(self): + """Test unwrapping and wrapping an operation with multiple arguments.""" + # New format uses commas: Sequential(ReLU, Conv2D, BatchNorm) + config_str = "Sequential(ReLU, Conv2D, BatchNorm)" + unwrapped = unwrap_config_string(config_str) + wrapped = wrap_config_into_string(unwrapped) + assert wrapped == config_str + + def test_nested_operations(self): + """Test unwrapping and wrapping nested operations.""" + # New format: Sequential(Sequential(ReLU), Conv2D) + config_str = "Sequential(Sequential(ReLU), Conv2D)" + unwrapped = unwrap_config_string(config_str) + wrapped = wrap_config_into_string(unwrapped) + assert wrapped == config_str + + def test_deeply_nested_operations(self): + """Test unwrapping and wrapping deeply nested operations.""" + # New format: Sequential(Sequential(Sequential(ReLU))) + config_str = "Sequential(Sequential(Sequential(ReLU)))" + unwrapped = unwrap_config_string(config_str) + wrapped = wrap_config_into_string(unwrapped) + assert wrapped == config_str + + def test_complex_nested_structure(self): + """Test unwrapping and wrapping a complex nested structure.""" + # New format with multiple levels and operands + config_str = ( + "Sequential(Sequential(ReLU, Conv2D), BatchNorm, Sequential(Dropout))" + ) + unwrapped = unwrap_config_string(config_str) + wrapped = wrap_config_into_string(unwrapped) + assert wrapped == config_str + + def test_round_trip_preservation(self): + """Test that unwrap->wrap round trip preserves the input.""" + test_cases = [ + "Sequential(ReLU)", + "Sequential(ReLU, Conv2D)", + "Sequential(Sequential(ReLU), Conv2D)", + "Sequential(Sequential(Sequential(ReLUConvBN)))", + ] + for config_str in test_cases: + unwrapped = unwrap_config_string(config_str) + wrapped = wrap_config_into_string(unwrapped) + assert wrapped == config_str, f"Round trip failed for: {config_str}" + + def test_operation_with_hyperparameters(self): + """Test that operations with hyperparameters can be unwrapped.""" + # Hyperparameters should be the first element inside the parentheses + config_str = "Conv2D({kernel_size: 3}, input)" + unwrapped = unwrap_config_string(config_str) + wrapped = wrap_config_into_string(unwrapped) + # Round trip should preserve the structure + assert wrapped == config_str + + def test_nested_operation_with_hyperparameters(self): + """Test unwrapping and wrapping nested operations with hyperparameters.""" + config_str = "Sequential(Conv2D({kernel_size: 3}, input), ReLU)" + unwrapped = unwrap_config_string(config_str) + wrapped = wrap_config_into_string(unwrapped) + # Should preserve the structure + assert wrapped == config_str + + def test_resblock_special_case(self): + """Test the special case handling of 'resBlock resBlock'.""" + # The special handling for resBlock + config_str = "resBlock resBlock" + unwrapped = unwrap_config_string(config_str) + wrapped = wrap_config_into_string(unwrapped) + assert wrapped == config_str + + def test_unwrapped_structure(self): + """Test the structure of unwrapped config parts.""" + config_str = "Sequential(ReLU, Conv2D)" + unwrapped = unwrap_config_string(config_str) + + # Should have 1 part: Sequential at level 1 with operands "ReLU, Conv2D" + assert len(unwrapped) == 1 + + # First part is Sequential + assert unwrapped[0].operator == "Sequential" + assert unwrapped[0].level == 1 + assert unwrapped[0].opening_index == 0 + assert unwrapped[0].operands == "ReLU, Conv2D" + + def test_grammar_like_example(self): + """Test a realistic example from grammar-like search spaces.""" + # From the actual test expectations + config_str = ( + "Sequential(Sequential(Sequential(ReLUConvBN)), Sequential(ReLUConvBN)," + " Identity)" + ) + unwrapped = unwrap_config_string(config_str) + wrapped = wrap_config_into_string(unwrapped) + assert wrapped == config_str + + +class TestConfigStringClass: + """Test the ConfigString class.""" + + def test_initialization(self): + """Test ConfigString initialization.""" + config_str = "Sequential(ReLU, Conv2D)" + cs = ConfigString(config_str) + assert cs.config_string == config_str + + def test_invalid_initialization(self): + """Test that ConfigString raises error for invalid input.""" + with pytest.raises(ValueError): + ConfigString("") + with pytest.raises((ValueError, TypeError)): + ConfigString(None) # type: ignore[arg-type] + + def test_unwrapped_property(self): + """Test the unwrapped property.""" + config_str = "Sequential(ReLU, Conv2D)" + cs = ConfigString(config_str) + unwrapped = cs.unwrapped + assert isinstance(unwrapped, tuple) + assert all(isinstance(part, UnwrappedConfigStringPart) for part in unwrapped) + + def test_max_hierarchy_level(self): + """Test max_hierarchy_level property.""" + config_str = "Sequential(Sequential(Sequential(ReLU)))" + cs = ConfigString(config_str) + # Sequential at level 1, Sequential at level 2, Sequential at level 3 + # ReLU doesn't have parentheses so it doesn't create its own unwrapped part + assert cs.max_hierarchy_level == 3 + + def test_at_hierarchy_level(self): + """Test at_hierarchy_level method.""" + config_str = "Sequential(Sequential(ReLU, Conv2D), BatchNorm)" + cs = ConfigString(config_str) + + # Get config at level 1 (just the outermost Sequential) + level_1 = cs.at_hierarchy_level(1) + assert "Sequential" in level_1.config_string + + # Get config at level 2 + level_2 = cs.at_hierarchy_level(2) + assert isinstance(level_2, ConfigString) + + def test_equality(self): + """Test ConfigString equality.""" + cs1 = ConfigString("Sequential(ReLU)") + cs2 = ConfigString("Sequential(ReLU)") + cs3 = ConfigString("Sequential(Conv2D)") + + assert cs1 == cs2 + assert cs1 != cs3 + assert cs2 != cs3 + + def test_hash(self): + """Test ConfigString hashing.""" + cs1 = ConfigString("Sequential(ReLU)") + cs2 = ConfigString("Sequential(ReLU)") + + # Same config strings should have same hash + assert hash(cs1) == hash(cs2) + + # Should be usable in sets + config_set = {cs1, cs2} + assert len(config_set) == 1 + + +class TestOperationSerialization: + """Test serialization of Operation objects with callables.""" + + def test_operation_with_args_and_kwargs(self): + """Test converting an Operation with args and kwargs to string.""" + from neps.space.neps_spaces.neps_space import convert_operation_to_string + from neps.space.neps_spaces.parameters import Operation + + # Create an operation with args and kwargs (like Conv2D) + operation = Operation( + "Conv2D", + args=(64,), + kwargs={"kernel_size": 3, "stride": 1}, + ) + + result = convert_operation_to_string(operation) + # Should serialize to Conv2D(64) + assert "Conv2D" in result + assert "64" in result + + def test_nested_operations_with_multiple_args(self): + """Test converting nested operations with multiple args.""" + from neps.space.neps_spaces.neps_space import convert_operation_to_string + from neps.space.neps_spaces.parameters import Operation + + # Create nested operations like Sequential(ReLU, Conv2D(64)) + conv_op = Operation("Conv2D", args=(64,), kwargs={"kernel_size": 3}) + relu_op = Operation("ReLU", args=(), kwargs={}) + sequential_op = Operation("Sequential", args=(relu_op, conv_op), kwargs={}) + + result = convert_operation_to_string(sequential_op) + # Should contain all operators + assert "Sequential" in result + assert "ReLU" in result + assert "Conv2D" in result + assert "64" in result + + def test_operation_with_callable_operator(self): + """Test converting an Operation with a callable operator.""" + from neps.space.neps_spaces.neps_space import convert_operation_to_string + from neps.space.neps_spaces.parameters import Operation + + # Define a simple callable + def my_layer(in_features, out_features): + return f"MyLayer({in_features}, {out_features})" + + # Create operation with callable + operation = Operation(my_layer, args=(128, 64), kwargs={}) + + result = convert_operation_to_string(operation) + # Should use the callable's name + assert "my_layer" in result + + def test_operation_serialization_with_mixed_args(self): + """Test operation with mix of simple args and operations.""" + from neps.space.neps_spaces.neps_space import convert_operation_to_string + from neps.space.neps_spaces.parameters import Operation + + # Create nested operation with mixed types + inner = Operation("ReLU", args=(), kwargs={}) + outer = Operation("Sequential", args=(inner, "BatchNorm"), kwargs={}) + + result = convert_operation_to_string(outer) + # Should serialize both operations and simple strings + assert "Sequential" in result + assert "ReLU" in result + assert "BatchNorm" in result + + def test_round_trip_with_operations(self): + """Test that operations can round-trip through unwrap/wrap.""" + from neps.space.neps_spaces.neps_space import convert_operation_to_string + from neps.space.neps_spaces.parameters import Operation + + # Create a complex nested structure + conv1 = Operation("Conv2D", args=(32,), kwargs={"kernel_size": 3}) + relu = Operation("ReLU", args=(), kwargs={}) + conv2 = Operation("Conv2D", args=(64,), kwargs={"kernel_size": 3}) + sequential = Operation("Sequential", args=(conv1, relu, conv2), kwargs={}) + + # Convert to string + config_str = convert_operation_to_string(sequential) + + # Verify it's parseable (unwrap should work) + unwrapped = unwrap_config_string(config_str) + assert len(unwrapped) > 0 + + # Verify it can be wrapped back + rewrapped = wrap_config_into_string(tuple(unwrapped)) + assert rewrapped == config_str + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index df92b6eec..bd692566f 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -291,12 +291,12 @@ def test_resolve_context(): ), } expected_s_config_string = ( - "(Sequential (Sequential (Sequential (ReLUConvBN)) (Sequential (Conv2D-3)" - " (Sequential (Sequential (Sequential (Sequential (Identity) (Conv2D-3)" - " (Identity)))) (Sequential (ReLUConvBN)) (Conv2D-3) (Identity) (Conv2D-1)" - " (Conv2D-3) (Conv2D-1) (Identity)) (ReLUConvBN))) (Sequential (Sequential" - " (Sequential (Sequential (Identity) (Sequential (ReLUConvBN)))))) (Conv2D-1)" - " (Conv2D-1) (Identity) (Identity) (Conv2D-1) (Conv2D-1))" + "Sequential(Sequential(Sequential(ReLUConvBN), Sequential(Conv2D-3," + " Sequential(Sequential(Sequential(Sequential(Identity, Conv2D-3," + " Identity))), Sequential(ReLUConvBN), Conv2D-3, Identity, Conv2D-1," + " Conv2D-3, Conv2D-1, Identity), ReLUConvBN)), Sequential(Sequential" + "(Sequential(Sequential(Identity, Sequential(ReLUConvBN))))), Conv2D-1," + " Conv2D-1, Identity, Identity, Conv2D-1, Conv2D-1)" ) pipeline = GrammarLike() @@ -482,17 +482,17 @@ def test_resolve_context_alt(): ), } expected_s_config_string = ( - "(Sequential (Sequential (Sequential (Sequential (Sequential " - "(Sequential (Conv2D-3) (Sequential (ReLUConvBN)))) (Sequential " - "(ReLUConvBN)) (Identity) (Conv2D-3) (Conv2D-3) (Conv2D-1) (Conv2D-3) " - "(Identity)))) (Sequential (Conv2D-3) (Sequential (Sequential " - "(Sequential (Sequential (Sequential (ReLUConvBN)) (Sequential " - "(Conv2D-1))) (Sequential (Sequential (ReLUConvBN))) (Conv2D-1) " - "(Conv2D-1) (Identity) (Conv2D-1) (Conv2D-3) (Conv2D-3))) " - "(Sequential (Sequential (ReLUConvBN)) (Sequential (Sequential " - "(Sequential (Identity))) (Sequential (Conv2D-3))) (Conv2D-1) " - "(Identity) (Conv2D-1) (Conv2D-1) (Conv2D-1) (Identity)) (Identity) " - "(Identity) (Identity) (Conv2D-1) (Conv2D-1) (Conv2D-3)) (ReLUConvBN)))" + "Sequential(Sequential(Sequential(Sequential(Sequential" + "(Sequential(Conv2D-3, Sequential(ReLUConvBN))), Sequential" + "(ReLUConvBN), Identity, Conv2D-3, Conv2D-3, Conv2D-1, Conv2D-3," + " Identity))), Sequential(Conv2D-3, Sequential(Sequential" + "(Sequential(Sequential(Sequential(ReLUConvBN), Sequential" + "(Conv2D-1)), Sequential(Sequential(ReLUConvBN)), Conv2D-1," + " Conv2D-1, Identity, Conv2D-1, Conv2D-3, Conv2D-3))," + " Sequential(Sequential(ReLUConvBN), Sequential(Sequential" + "(Sequential(Identity)), Sequential(Conv2D-3)), Conv2D-1," + " Identity, Conv2D-1, Conv2D-1, Conv2D-1, Identity), Identity," + " Identity, Identity, Conv2D-1, Conv2D-1, Conv2D-3), ReLUConvBN))" ) pipeline = GrammarLikeAlt() From 67c0a8388316340225f0373c3dea8f4a81342360 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 28 Oct 2025 23:43:55 +0100 Subject: [PATCH 093/156] fix: enhance configuration string formatting for hyperparameters and improve test assertions --- neps/space/neps_spaces/config_string.py | 20 ++- .../test_search_space__hnas_like.py | 148 +++++++++--------- .../test_search_space__reuse_arch_elements.py | 78 ++++----- 3 files changed, 135 insertions(+), 111 deletions(-) diff --git a/neps/space/neps_spaces/config_string.py b/neps/space/neps_spaces/config_string.py index 9b12507ee..a31e05c7a 100644 --- a/neps/space/neps_spaces/config_string.py +++ b/neps/space/neps_spaces/config_string.py @@ -166,6 +166,13 @@ def reconstruct(item: UnwrappedConfigStringPart) -> str: # noqa: C901, PLR0912 item.operator.__name__ if callable(item.operator) else item.operator ) + # Check if we have hyperparameters (not empty and not just "{}") + has_hyperparameters = ( + item.hyperparameters + and item.hyperparameters.strip() + and item.hyperparameters != "{}" + ) + # Get children of this item item_key = (item.level, item.opening_index) children = children_by_parent.get(item_key, []) @@ -173,9 +180,14 @@ def reconstruct(item: UnwrappedConfigStringPart) -> str: # noqa: C901, PLR0912 # Reconstruct operands if not item.operands and not children: # No operands at all - just return the operator name + # (or operator with hyperparameters if they exist) + if has_hyperparameters: + return f"{operator_name}({item.hyperparameters})" return operator_name if not children: - # Leaf node - just wrap the operands + # Leaf node - wrap the operands (and hyperparameters if present) + if has_hyperparameters: + return f"{operator_name}({item.hyperparameters}, {item.operands})" return f"{operator_name}({item.operands})" # Has children - need to mix nested and non-nested operands # Parse operands to separate nested from non-nested @@ -217,6 +229,12 @@ def reconstruct(item: UnwrappedConfigStringPart) -> str: # noqa: C901, PLR0912 reconstructed_parts.append(reconstruct(children[child_idx])) child_idx += 1 + # Build result with hyperparameters if present + if has_hyperparameters: + return ( + f"{operator_name}({item.hyperparameters}," + f" {', '.join(reconstructed_parts)})" + ) return f"{operator_name}({', '.join(reconstructed_parts)})" # Start reconstruction from the root (level 1 items) diff --git a/tests/test_neps_space/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py index 9b323268b..4bf6f47d7 100644 --- a/tests/test_neps_space/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -250,14 +250,18 @@ def test_hnas_like_context(): ), "Resolvable.CL.args.sequence[5].resampled_categorical::categorical__4": 0, "Resolvable.ARCH::categorical__3": 1, - "Resolvable.ARCH.sampled_value.args.sequence[0].resampled_categorical::categorical__3": 2, + "Resolvable.ARCH.sampled_value.args.sequence[0].resampled_categorical::categorical__3": ( + 2 + ), "Resolvable.ARCH.sampled_value.args.sequence[0].resampled_categorical.sampled_value.args.sequence[0].resampled_categorical::categorical__3": ( 2 ), "Resolvable.ARCH.sampled_value.args.sequence[0].resampled_categorical.sampled_value.args.sequence[1].resampled_categorical::categorical__3": ( 0 ), - "Resolvable.ARCH.sampled_value.args.sequence[1].resampled_categorical::categorical__3": 2, + "Resolvable.ARCH.sampled_value.args.sequence[1].resampled_categorical::categorical__3": ( + 2 + ), "Resolvable.ARCH.sampled_value.args.sequence[1].resampled_categorical.sampled_value.args.sequence[0].resampled_categorical::categorical__3": ( 0 ), @@ -270,80 +274,82 @@ def test_hnas_like_context(): "Resolvable.ARCH.sampled_value.args.sequence[1].resampled_categorical.sampled_value.args.sequence[3].resampled_categorical::categorical__3": ( 1 ), - "Resolvable.ARCH.sampled_value.args.sequence[2].resampled_categorical::categorical__3": 2, + "Resolvable.ARCH.sampled_value.args.sequence[2].resampled_categorical::categorical__3": ( + 2 + ), } expected_cl_config_string = ( - "(CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3)" - " (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK" - " Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))" + "CELL Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3," + " NORM batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK" + " Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero)" ) expected_arch_config_string = ( - "(D2 Sequential3 (D0 Residual3 (C Residual2 (CELL Cell (OPS Sequential1" - " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" - " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" - " (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1" - " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" - " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" - " (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1" - " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" - " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" - " (CONV dconv3x3) (NORM layer))) (OPS zero))) (C Sequential2 (CELL Cell (OPS" - " Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch)))" - " (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT" - " relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1" - " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" - " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" - " (CONV dconv3x3) (NORM layer))) (OPS zero))) (CELL Cell (OPS Sequential1" - " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" - " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" - " (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1" - " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" - " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" - " (CONV dconv3x3) (NORM layer))) (OPS zero))) (D1 Residual3 (C Sequential2 (CELL" - " Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM" - " batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK" - " Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell" - " (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM" - " batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK" - " Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (C" - " Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV" - " dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1" - " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))" - " (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3)" - " (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK" - " Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))) (DOWN" - " Sequential2 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV" - " dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1" - " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero))" - " resBlock) (DOWN Sequential3 (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3" - " (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool)" - " (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM" - " layer))) (OPS zero)) (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT" - " relu) (CONV dconv3x3) (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS" - " Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer)))" - " (OPS zero)) resBlock)) (D1 Residual3 (C Sequential2 (CELL Cell (OPS Sequential1" - " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" - " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" - " (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1" - " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" - " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" - " (CONV dconv3x3) (NORM layer))) (OPS zero))) (C Sequential2 (CELL Cell (OPS" - " Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch)))" - " (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT" - " relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell (OPS Sequential1" - " (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch))) (OPS zero)" - " (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu)" - " (CONV dconv3x3) (NORM layer))) (OPS zero))) (DOWN Sequential2 (CELL Cell (OPS" - " Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM batch)))" - " (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK Sequential3 (ACT" - " relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) resBlock) (DOWN Sequential3" - " (CELL Cell (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3)" - " (NORM batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK" - " Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) (CELL Cell" - " (OPS Sequential1 (CONVBLOCK Sequential3 (ACT relu) (CONV dconv3x3) (NORM" - " batch))) (OPS zero) (OPS id) (OPS avg_pool) (OPS Sequential1 (CONVBLOCK" - " Sequential3 (ACT relu) (CONV dconv3x3) (NORM layer))) (OPS zero)) resBlock)))" + "D2 Sequential3(D0 Residual3(C Residual2(CELL Cell(OPS Sequential1" + "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," + " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," + " CONV dconv3x3, NORM layer)), OPS zero), CELL Cell(OPS Sequential1" + "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," + " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," + " CONV dconv3x3, NORM layer)), OPS zero), CELL Cell(OPS Sequential1" + "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," + " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," + " CONV dconv3x3, NORM layer)), OPS zero)), C Sequential2(CELL Cell(OPS" + " Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch))," + " OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT" + " relu, CONV dconv3x3, NORM layer)), OPS zero), CELL Cell(OPS Sequential1" + "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," + " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," + " CONV dconv3x3, NORM layer)), OPS zero)), CELL Cell(OPS Sequential1" + "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," + " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," + " CONV dconv3x3, NORM layer)), OPS zero), CELL Cell(OPS Sequential1" + "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," + " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," + " CONV dconv3x3, NORM layer)), OPS zero)), D1 Residual3(C Sequential2(CELL" + " Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM" + " batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK" + " Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero), CELL Cell" + "(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM" + " batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK" + " Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero)), C" + " Sequential2(CELL Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV" + " dconv3x3, NORM batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1" + "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero)," + " CELL Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3," + " NORM batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK" + " Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero)), DOWN" + " Sequential2(CELL Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV" + " dconv3x3, NORM batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1" + "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero)," + " resBlock), DOWN Sequential3(CELL Cell(OPS Sequential1(CONVBLOCK Sequential3" + "(ACT relu, CONV dconv3x3, NORM batch)), OPS zero, OPS id, OPS avg_pool," + " OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM" + " layer)), OPS zero), CELL Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT" + " relu, CONV dconv3x3, NORM batch)), OPS zero, OPS id, OPS avg_pool, OPS" + " Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM layer))," + " OPS zero), resBlock)), D1 Residual3(C Sequential2(CELL Cell(OPS Sequential1" + "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," + " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," + " CONV dconv3x3, NORM layer)), OPS zero), CELL Cell(OPS Sequential1" + "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," + " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," + " CONV dconv3x3, NORM layer)), OPS zero)), C Sequential2(CELL Cell(OPS" + " Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch))," + " OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT" + " relu, CONV dconv3x3, NORM layer)), OPS zero), CELL Cell(OPS Sequential1" + "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," + " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," + " CONV dconv3x3, NORM layer)), OPS zero)), DOWN Sequential2(CELL Cell(OPS" + " Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch))," + " OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT" + " relu, CONV dconv3x3, NORM layer)), OPS zero), resBlock), DOWN Sequential3" + "(CELL Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3," + " NORM batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK" + " Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero), CELL Cell" + "(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM" + " batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK" + " Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero), resBlock)))" ) pipeline = HNASLikePipeline() diff --git a/tests/test_neps_space/test_search_space__reuse_arch_elements.py b/tests/test_neps_space/test_search_space__reuse_arch_elements.py index c5554236a..942cc2185 100644 --- a/tests/test_neps_space/test_search_space__reuse_arch_elements.py +++ b/tests/test_neps_space/test_search_space__reuse_arch_elements.py @@ -155,9 +155,9 @@ def test_nested_simple(): @pytest.mark.repeat(50) def test_nested_simple_string(): possible_cell_config_strings = { - "(relu)", - "(prelu_with_args (0.1) (0.2))", - "(prelu_with_kwargs {'init': 0.1})", + "relu", + "prelu_with_args(0.1, 0.2)", + "prelu_with_kwargs({'init': 0.1})", } pipeline = ActPipelineSimple() @@ -207,8 +207,8 @@ def test_nested_complex_string(): act_config_string = neps_space.convert_operation_to_string(act) assert act_config_string - # expected to look like: "(prelu {'init': 0.1087727907176638})" - expected_prefix = "(prelu {'init': " + # expected to look like: "prelu({'init': 0.1087727907176638})" + expected_prefix = "prelu({'init': " expected_ending = "})" assert act_config_string.startswith(expected_prefix) assert act_config_string.endswith(expected_ending) @@ -243,7 +243,7 @@ def test_fixed_pipeline_string(): act = resolved_pipeline.act act_config_string = neps_space.convert_operation_to_string(act) assert act_config_string - assert act_config_string == "(prelu {'init': 0.5})" + assert act_config_string == "prelu({'init': 0.5})" @pytest.mark.repeat(50) @@ -279,14 +279,14 @@ def test_simple_reuse(): @pytest.mark.repeat(50) def test_simple_reuse_string(): possible_conv_block_config_strings = { - "(sequential3 (conv1x1) (conv1x1) (conv1x1))", - "(sequential3 (conv1x1) (conv3x3) (conv1x1))", - "(sequential3 (conv3x3) (conv1x1) (conv3x3))", - "(sequential3 (conv3x3) (conv3x3) (conv3x3))", - "(sequential3 (conv5x5) (conv5x5) (conv5x5))", - "(sequential3 (conv5x5) (conv9x9) (conv5x5))", - "(sequential3 (conv9x9) (conv5x5) (conv9x9))", - "(sequential3 (conv9x9) (conv9x9) (conv9x9))", + "sequential3(conv1x1, conv1x1, conv1x1)", + "sequential3(conv1x1, conv3x3, conv1x1)", + "sequential3(conv3x3, conv1x1, conv3x3)", + "sequential3(conv3x3, conv3x3, conv3x3)", + "sequential3(conv5x5, conv5x5, conv5x5)", + "sequential3(conv5x5, conv9x9, conv5x5)", + "sequential3(conv9x9, conv5x5, conv9x9)", + "sequential3(conv9x9, conv9x9, conv9x9)", } pipeline = ConvPipeline() @@ -350,43 +350,43 @@ def test_shared_complex(): def test_shared_complex_string(): possible_cell_config_strings = { ( - "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (avg_pool) (avg_pool)" - " (avg_pool) (avg_pool) (avg_pool))" + "cell({'float_hp': 0.5, 'int_hp': 2}, avg_pool, avg_pool, avg_pool," + " avg_pool, avg_pool, avg_pool)" ), ( - "(cell {'float_hp': 0.5, 'int_hp': 2} (zero) (sequential3 (relu) (conv3x3)" - " (batch)) (zero) (sequential3 (relu) (conv3x3) (batch)) (zero) (sequential3" - " (relu) (conv3x3) (batch)))" + "cell({'float_hp': 0.5, 'int_hp': 2}, zero, sequential3(relu, conv3x3," + " batch), zero, sequential3(relu, conv3x3, batch), zero, sequential3" + "(relu, conv3x3, batch))" ), ( - "(cell {'float_hp': 0.5, 'int_hp': 2} (sequential3 (relu) (conv3x3) (batch))" - " (avg_pool) (sequential3 (relu) (conv3x3) (batch)) (avg_pool) (sequential3" - " (relu) (conv3x3) (batch)) (avg_pool))" + "cell({'float_hp': 0.5, 'int_hp': 2}, sequential3(relu, conv3x3, batch)," + " avg_pool, sequential3(relu, conv3x3, batch), avg_pool, sequential3" + "(relu, conv3x3, batch), avg_pool)" ), - "(cell {'float_hp': 0.5, 'int_hp': 2} (zero) (zero) (zero) (zero) (zero) (zero))", + "cell({'float_hp': 0.5, 'int_hp': 2}, zero, zero, zero, zero, zero, zero)", ( - "(cell {'float_hp': 0.5, 'int_hp': 2} (zero) (avg_pool) (zero) (avg_pool)" - " (zero) (avg_pool))" + "cell({'float_hp': 0.5, 'int_hp': 2}, zero, avg_pool, zero, avg_pool," + " zero, avg_pool)" ), ( - "(cell {'float_hp': 0.5, 'int_hp': 2} (sequential3 (relu) (conv3x3) (batch))" - " (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu) (conv3x3)" - " (batch)) (sequential3 (relu) (conv3x3) (batch)) (sequential3 (relu)" - " (conv3x3) (batch)) (sequential3 (relu) (conv3x3) (batch)))" + "cell({'float_hp': 0.5, 'int_hp': 2}, sequential3(relu, conv3x3, batch)," + " sequential3(relu, conv3x3, batch), sequential3(relu, conv3x3," + " batch), sequential3(relu, conv3x3, batch), sequential3(relu," + " conv3x3, batch), sequential3(relu, conv3x3, batch))" ), ( - "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (zero) (avg_pool) (zero)" - " (avg_pool) (zero))" + "cell({'float_hp': 0.5, 'int_hp': 2}, avg_pool, zero, avg_pool, zero," + " avg_pool, zero)" ), ( - "(cell {'float_hp': 0.5, 'int_hp': 2} (sequential3 (relu) (conv3x3) (batch))" - " (zero) (sequential3 (relu) (conv3x3) (batch)) (zero) (sequential3 (relu)" - " (conv3x3) (batch)) (zero))" + "cell({'float_hp': 0.5, 'int_hp': 2}, sequential3(relu, conv3x3, batch)," + " zero, sequential3(relu, conv3x3, batch), zero, sequential3(relu," + " conv3x3, batch), zero)" ), ( - "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (sequential3 (relu)" - " (conv3x3) (batch)) (avg_pool) (sequential3 (relu) (conv3x3) (batch))" - " (avg_pool) (sequential3 (relu) (conv3x3) (batch)))" + "cell({'float_hp': 0.5, 'int_hp': 2}, avg_pool, sequential3(relu," + " conv3x3, batch), avg_pool, sequential3(relu, conv3x3, batch)," + " avg_pool, sequential3(relu, conv3x3, batch))" ), } @@ -447,8 +447,8 @@ def test_shared_complex_context(): assert resolved_pipeline_second is not resolved_pipeline_first expected_config_string: str = ( - "(cell {'float_hp': 0.5, 'int_hp': 2} (avg_pool) (zero) (avg_pool) (zero)" - " (avg_pool) (zero))" + "cell({'float_hp': 0.5, 'int_hp': 2}, avg_pool, zero, avg_pool, zero," + " avg_pool, zero)" ) # however, their final results should be the same thing From 392303b6a7891b7a75fa8ee3c1926bfb84002f71 Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 29 Oct 2025 00:01:33 +0100 Subject: [PATCH 094/156] fix: update root directory paths in tests for consistency and clarity --- neps_examples/__init__.py | 1 + .../expert_priors_for_hyperparameters.py | 36 ++++++++----------- .../test_neps_space/test_neps_integration.py | 8 ++--- ...st_neps_integration_priorband__max_cost.py | 4 +-- ...t_neps_integration_priorband__max_evals.py | 4 +-- 5 files changed, 24 insertions(+), 29 deletions(-) diff --git a/neps_examples/__init__.py b/neps_examples/__init__.py index bd41652af..2bf4ccdd9 100644 --- a/neps_examples/__init__.py +++ b/neps_examples/__init__.py @@ -4,6 +4,7 @@ "architecture", "architecture_and_hyperparameters", "hyperparameters", + "pytorch_nn_example", ], "convenience": [ "logging_additional_info", diff --git a/neps_examples/efficiency/expert_priors_for_hyperparameters.py b/neps_examples/efficiency/expert_priors_for_hyperparameters.py index b84a66c05..f966d0731 100644 --- a/neps_examples/efficiency/expert_priors_for_hyperparameters.py +++ b/neps_examples/efficiency/expert_priors_for_hyperparameters.py @@ -23,29 +23,23 @@ def evaluate_pipeline(some_float, some_integer, some_cat): # neps uses the default values and a confidence in this default value to construct a prior # that speeds up the search class HPOSpace(neps.PipelineSpace): - some_float = ( - neps.Float( - min_value=1, - max_value=1000, - log=True, - prior=900, - prior_confidence="medium", - ), + some_float = neps.Float( + min_value=1, + max_value=1000, + log=True, + prior=900, + prior_confidence="medium", ) - some_integer = ( - neps.Integer( - min_value=0, - max_value=50, - prior=35, - prior_confidence="low", - ), + some_integer = neps.Integer( + min_value=0, + max_value=50, + prior=35, + prior_confidence="low", ) - some_cat = ( - neps.Categorical( - choices=("a", "b", "c"), - prior=0, - prior_confidence="high", - ), + some_cat = neps.Categorical( + choices=("a", "b", "c"), + prior=0, + prior_confidence="high", ) diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index adc4cb55a..1fb4da66b 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -167,7 +167,7 @@ class DemoHyperparameterComplexSpace(PipelineSpace): ) def test_hyperparameter_demo(optimizer): pipeline_space = DemoHyperparameterSpace() - root_directory = f"/tests_tmpdir/test_neps_spaces/results/hyperparameter_demo__{optimizer.func.__name__}" + root_directory = f"tests_tmpdir/test_neps_spaces/results/hyperparameter_demo__{optimizer.func.__name__}" neps.run( evaluate_pipeline=hyperparameter_pipeline_to_optimize, @@ -189,7 +189,7 @@ def test_hyperparameter_demo(optimizer): ) def test_hyperparameter_with_fidelity_demo(optimizer): pipeline_space = DemoHyperparameterWithFidelitySpace() - root_directory = f"/tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity_demo__{optimizer.func.__name__}" + root_directory = f"tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity_demo__{optimizer.func.__name__}" neps.run( evaluate_pipeline=hyperparameter_pipeline_to_optimize, @@ -211,7 +211,7 @@ def test_hyperparameter_with_fidelity_demo(optimizer): ) def test_hyperparameter_complex_demo(optimizer): pipeline_space = DemoHyperparameterComplexSpace() - root_directory = f"/tests_tmpdir/test_neps_spaces/results/hyperparameter_complex_demo__{optimizer.func.__name__}" + root_directory = f"tests_tmpdir/test_neps_spaces/results/hyperparameter_complex_demo__{optimizer.func.__name__}" neps.run( evaluate_pipeline=hyperparameter_pipeline_to_optimize, @@ -363,7 +363,7 @@ def test_operation_demo(optimizer): def test_neps_hyperband_with_fidelity_demo(optimizer): """Test neps_hyperband with a fidelity space.""" pipeline_space = DemoHyperparameterWithFidelitySpace() - root_directory = f"/tests_tmpdir/test_neps_spaces/results/neps_hyperband_fidelity_demo__{optimizer.__name__}" + root_directory = f"tests_tmpdir/test_neps_spaces/results/neps_hyperband_fidelity_demo__{optimizer.__name__}" neps.run( evaluate_pipeline=hyperparameter_pipeline_to_optimize, diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index eda19c37f..bb9751266 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -93,7 +93,7 @@ class DemoHyperparameterWithFidelitySpace(PipelineSpace): def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): optimizer.__name__ = optimizer_name # Needed by NEPS later. pipeline_space = DemoHyperparameterWithFidelitySpace() - root_directory = f"/tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" + root_directory = f"tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" # Reset the _COSTS global, so they do not get mixed up between tests. _COSTS.clear() @@ -133,7 +133,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): optimizer.__name__ = optimizer_name # Needed by NEPS later. pipeline_space = DemoHyperparameterWithFidelitySpace() - root_directory = f"/tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" + root_directory = f"tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" # Reset the _COSTS global, so they do not get mixed up between tests. _COSTS.clear() diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index b95463cda..b9320d038 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -80,7 +80,7 @@ class DemoHyperparameterWithFidelitySpace(PipelineSpace): def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): optimizer.__name__ = optimizer_name # Needed by NEPS later. pipeline_space = DemoHyperparameterWithFidelitySpace() - root_directory = f"/tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" + root_directory = f"tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" neps.run( evaluate_pipeline=evaluate_pipeline, @@ -117,7 +117,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): optimizer.__name__ = optimizer_name # Needed by NEPS later. pipeline_space = DemoHyperparameterWithFidelitySpace() - root_directory = f"/tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" + root_directory = f"tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" neps.run( evaluate_pipeline=evaluate_pipeline, From 1016925ce9ab0c70c7ad1a10133a0b9703460e2a Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 29 Oct 2025 00:11:00 +0100 Subject: [PATCH 095/156] fix: improve path formatting in tests and enhance readability of assertions --- tests/test_neps_space/test_neps_integration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index 1fb4da66b..654dec671 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -336,7 +336,7 @@ class DemoOperationSpace(PipelineSpace): def test_operation_demo(optimizer): pipeline_space = DemoOperationSpace() root_directory = ( - f"/tests_tmpdir/test_neps_spaces/results/operation_demo__{optimizer.__name__}" + f"tests_tmpdir/test_neps_spaces/results/operation_demo__{optimizer.__name__}" ) neps.run( From e2c90cae883690e93160bc869e1575e1a1fc86f7 Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 29 Oct 2025 11:36:02 +0100 Subject: [PATCH 096/156] fix: normalize weights in PriorBandSampler to handle floating-point precision issues --- neps/optimizers/priorband.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/neps/optimizers/priorband.py b/neps/optimizers/priorband.py index 6d01581e9..513fdfb9a 100644 --- a/neps/optimizers/priorband.py +++ b/neps/optimizers/priorband.py @@ -47,7 +47,7 @@ class PriorBandSampler: fid_bounds: tuple[int, int] | tuple[float, float] """The fidelity bounds.""" - def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: + def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: # noqa: PLR0915 """Samples a configuration using the PriorBand algorithm. Args: @@ -145,6 +145,14 @@ def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: # 4. And finally, we distribute the original w_prior according to this ratio w_inc = w_prior * inc_ratio w_prior = w_prior * prior_ratio + + # Normalize to ensure the weights sum to exactly 1.0 + # This handles floating-point precision issues + total_weight = w_prior + w_inc + w_random + w_prior = w_prior / total_weight + w_inc = w_inc / total_weight + w_random = w_random / total_weight + assert np.isclose(w_prior + w_inc + w_random, 1.0) # Now we use these weights to choose which sampling distribution to sample from From a9b6603221f32175ed62f034bf7f205e245945fc Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 29 Oct 2025 12:33:23 +0100 Subject: [PATCH 097/156] fix: update optimizer names in tests for consistency and clarity --- ...st_neps_integration_priorband__max_cost.py | 30 +++++++++--------- ...t_neps_integration_priorband__max_evals.py | 31 ++++++++++--------- 2 files changed, 33 insertions(+), 28 deletions(-) diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index bb9751266..ddafe81a1 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -66,32 +66,34 @@ class DemoHyperparameterWithFidelitySpace(PipelineSpace): [ ( partial(algorithms.neps_random_search, ignore_fidelity=True), - "new__RandomSearch", + "neps_random_search", ), ( partial(algorithms.complex_random_search, ignore_fidelity=True), - "new__ComplexRandomSearch", + "neps_complex_random_search", ), ( partial(algorithms.neps_priorband, base="successive_halving"), - "new__priorband+successive_halving", + "neps_priorband+successive_halving", ), ( partial(algorithms.neps_priorband, base="asha"), - "new__priorband+asha", + "neps_priorband+asha", ), ( partial(algorithms.neps_priorband, base="async_hb"), - "new__priorband+async_hb", + "neps_priorband+async_hb", ), ( algorithms.neps_priorband, - "new__priorband+hyperband", + "neps_priorband+hyperband", ), ], ) def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): - optimizer.__name__ = optimizer_name # Needed by NEPS later. + optimizer.__name__ = ( + "neps_priorband" if "priorband" in optimizer_name else optimizer_name + ) # Needed by NEPS later. pipeline_space = DemoHyperparameterWithFidelitySpace() root_directory = f"tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" @@ -103,7 +105,7 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, - cost_to_spend=100, # Reduced from 1000 to make tests faster + cost_to_spend=100, overwrite_root_directory=True, ) neps.status(root_directory, print_summary=True) @@ -114,24 +116,24 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): [ ( partial(algorithms.priorband, base="successive_halving"), - "old__priorband+successive_halving", + "old_priorband+successive_halving", ), ( partial(algorithms.priorband, base="asha"), - "old__priorband+asha", + "old_priorband+asha", ), ( partial(algorithms.priorband, base="async_hb"), - "old__priorband+async_hb", + "old_priorband+async_hb", ), ( algorithms.priorband, - "old__priorband+hyperband", + "old_priorband+hyperband", ), ], ) def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): - optimizer.__name__ = optimizer_name # Needed by NEPS later. + optimizer.__name__ = "priorband" # Needed by NEPS later. pipeline_space = DemoHyperparameterWithFidelitySpace() root_directory = f"tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity__costs__{optimizer.__name__}" @@ -143,7 +145,7 @@ def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, - cost_to_spend=100, # Reduced from 1000 to make tests faster + cost_to_spend=100, overwrite_root_directory=True, ) neps.status(root_directory, print_summary=True) diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index b9320d038..cf21ac6b5 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -53,32 +53,34 @@ class DemoHyperparameterWithFidelitySpace(PipelineSpace): [ ( partial(algorithms.neps_random_search, ignore_fidelity=True), - "new__RandomSearch", + "neps_random_search", ), ( partial(algorithms.complex_random_search, ignore_fidelity=True), - "new__ComplexRandomSearch", + "neps_complex_random_search", ), ( partial(algorithms.neps_priorband, base="successive_halving"), - "new__priorband+successive_halving", + "neps_priorband+successive_halving", ), ( partial(algorithms.neps_priorband, base="asha"), - "new__priorband+asha", + "neps_priorband+asha", ), ( partial(algorithms.neps_priorband, base="async_hb"), - "new__priorband+async_hb", + "neps_priorband+async_hb", ), ( algorithms.neps_priorband, - "new__priorband+hyperband", + "neps_priorband+hyperband", ), ], ) def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): - optimizer.__name__ = optimizer_name # Needed by NEPS later. + optimizer.__name__ = ( + "neps_priorband" if "priorband" in optimizer_name else optimizer_name + ) # Needed by NEPS later. pipeline_space = DemoHyperparameterWithFidelitySpace() root_directory = f"tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" @@ -87,7 +89,8 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, - evaluations_to_spend=100, + fidelities_to_spend=50 if "priorband" in optimizer.__name__ else None, + evaluations_to_spend=50 if "priorband" not in optimizer.__name__ else None, overwrite_root_directory=True, ) neps.status(root_directory, print_summary=True) @@ -98,24 +101,24 @@ def test_hyperparameter_with_fidelity_demo_new(optimizer, optimizer_name): [ ( partial(algorithms.priorband, base="successive_halving"), - "old__priorband+successive_halving", + "old_priorband+successive_halving", ), ( partial(algorithms.priorband, base="asha"), - "old__priorband+asha", + "old_priorband+asha", ), ( partial(algorithms.priorband, base="async_hb"), - "old__priorband+async_hb", + "old_priorband+async_hb", ), ( algorithms.priorband, - "old__priorband+hyperband", + "old_priorband+hyperband", ), ], ) def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): - optimizer.__name__ = optimizer_name # Needed by NEPS later. + optimizer.__name__ = "priorband" # Needed by NEPS later. pipeline_space = DemoHyperparameterWithFidelitySpace() root_directory = f"tests_tmpdir/test_neps_spaces/results/hyperparameter_with_fidelity__evals__{optimizer.__name__}" @@ -124,7 +127,7 @@ def test_hyperparameter_with_fidelity_demo_old(optimizer, optimizer_name): pipeline_space=pipeline_space, optimizer=optimizer, root_directory=root_directory, - evaluations_to_spend=100, + fidelities_to_spend=50, overwrite_root_directory=True, ) neps.status(root_directory, print_summary=True) From b354f9f9e47e720a08fd46d46df039a00bec3d8e Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 29 Oct 2025 12:41:28 +0100 Subject: [PATCH 098/156] fix: normalize weights in PriorBandSampler to handle floating-point precision issues and increase np.isclose tolerance --- neps/optimizers/neps_priorband.py | 11 ++++++++++- neps/optimizers/priorband.py | 3 ++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/neps/optimizers/neps_priorband.py b/neps/optimizers/neps_priorband.py index 16ea26e17..bd90a49ee 100644 --- a/neps/optimizers/neps_priorband.py +++ b/neps/optimizers/neps_priorband.py @@ -130,7 +130,16 @@ def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: # 4. And finally, we distribute the original w_prior according to this ratio w_inc = w_prior * inc_ratio w_prior = w_prior * prior_ratio - assert np.isclose(w_prior + w_inc + w_random, 1.0) + + # Normalize to ensure the weights sum to exactly 1.0 + # This handles floating-point precision issues + total_weight = w_prior + w_inc + w_random + w_prior = w_prior / total_weight + w_inc = w_inc / total_weight + w_random = w_random / total_weight + + # Verify weights are valid probabilities (relaxed tolerance for floating-point) + assert np.isclose(w_prior + w_inc + w_random, 1.0, rtol=1e-7, atol=1e-9) # Now we use these weights to choose which sampling distribution to sample from policy = np.random.choice( diff --git a/neps/optimizers/priorband.py b/neps/optimizers/priorband.py index 513fdfb9a..c12358ccf 100644 --- a/neps/optimizers/priorband.py +++ b/neps/optimizers/priorband.py @@ -153,7 +153,8 @@ def sample_config(self, table: pd.DataFrame, rung: int) -> dict[str, Any]: # no w_inc = w_inc / total_weight w_random = w_random / total_weight - assert np.isclose(w_prior + w_inc + w_random, 1.0) + # Verify weights are valid probabilities (relaxed tolerance for floating-point) + assert np.isclose(w_prior + w_inc + w_random, 1.0, rtol=1e-7, atol=1e-9) # Now we use these weights to choose which sampling distribution to sample from policy = np.random.choice( From bf78ebac1a5e6ef3f202403f914f59a80ce93302 Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 31 Oct 2025 00:26:25 +0100 Subject: [PATCH 099/156] feat: add Regularized Evolution optimizer and integrate into NePS algorithms --- neps/optimizers/algorithms.py | 24 ++ neps/optimizers/neps_regularized_evolution.py | 362 ++++++++++++++++++ neps/space/neps_spaces/neps_space.py | 3 + 3 files changed, 389 insertions(+) create mode 100644 neps/optimizers/neps_regularized_evolution.py diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index b434aeb12..0bc50e649 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -40,6 +40,7 @@ NePSComplexRandomSearch, NePSRandomSearch, ) +from neps.optimizers.neps_regularized_evolution import NePSRegularizedEvolution from neps.optimizers.optimizer import AskFunction # noqa: TC001 from neps.optimizers.primo import PriMO from neps.optimizers.priorband import PriorBandSampler @@ -1807,6 +1808,29 @@ def neps_priorband( ) +def neps_regularized_evolution( + pipeline_space: PipelineSpace, + *, + population_size: int = 20, + tournament_size: int = 5, + use_priors: bool = True, + mutation_type: float | Literal["mutate_best", "crossover_top_2"] = 0.5, + n_mutations: int | Literal["random", "half"] | None = "random", + n_forgets: int | Literal["random", "half"] | None = None, + ignore_fidelity: bool | Literal["highest fidelity"] = False, +) -> NePSRegularizedEvolution: + return NePSRegularizedEvolution( + pipeline=pipeline_space, + population_size=population_size, + tournament_size=tournament_size, + use_priors=use_priors, + mutation_type=mutation_type, + n_mutations=n_mutations, + n_forgets=n_forgets, + ignore_fidelity=ignore_fidelity, + ) + + PredefinedOptimizers: Mapping[str, Any] = { f.__name__: f for f in ( diff --git a/neps/optimizers/neps_regularized_evolution.py b/neps/optimizers/neps_regularized_evolution.py new file mode 100644 index 000000000..42f926bb1 --- /dev/null +++ b/neps/optimizers/neps_regularized_evolution.py @@ -0,0 +1,362 @@ +"""This module implements a Regularized Evolution optimizer for NEPS.""" + +from __future__ import annotations + +import heapq +import random +from collections.abc import Mapping +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Literal + +from neps.space.neps_spaces.neps_space import ( + SamplingResolutionContext, + _prepare_sampled_configs, + resolve, +) +from neps.space.neps_spaces.parameters import Float, Integer, PipelineSpace +from neps.space.neps_spaces.sampling import ( + CrossoverByMixingSampler, + CrossoverNotPossibleError, + MutatateUsingCentersSampler, + MutateByForgettingSampler, + PriorOrFallbackSampler, + RandomSampler, +) + +if TYPE_CHECKING: + import neps.state.optimizer as optimizer_state + import neps.state.trial as trial_state + from neps.optimizers import optimizer + from neps.state.trial import Trial + + +@dataclass +class NePSRegularizedEvolution: + """A complex random search optimizer for a NePS pipeline. + It samples configurations randomly from the pipeline's domain and environment values, + and also performs mutations and crossovers based on previous successful trials. + + Args: + pipeline: The pipeline to optimize, which should be a Pipeline object. + + Raises: + ValueError: If the pipeline is not a Pipeline object. + """ + + def __init__( + self, + pipeline: PipelineSpace, + population_size: int = 20, + tournament_size: int = 5, + use_priors: bool = True, # noqa: FBT001, FBT002 + mutation_type: float | Literal["mutate_best", "crossover_top_2"] = 0.5, + n_mutations: int | Literal["random", "half"] | None = "random", + n_forgets: int | Literal["random", "half"] | None = None, + ignore_fidelity: bool | Literal["highest fidelity"] = False, # noqa: FBT002 + ): + """Initialize the RegularizedEvolution optimizer with a pipeline. + + Args: + pipeline: The pipeline to optimize, which should be a Pipeline object. + population_size: The size of the population for evolution. + tournament_size: The size of the tournament for selecting parents. + use_priors: Whether to use priors when sampling if available. + mutation_type: The type of mutation to use (e.g., "mutate_best", + "crossover_top_2"). If a float is provided, it is interpreted as the + probability of choosing mutation, compared to the probability of + crossover. + n_mutations: The number of mutations to apply. A fixed integer, "random" for + a random number between 1 and half the parameters, or "half" to mutate + half the parameters. + n_forgets: The number of parameters to forget. A fixed integer, "random" for + a random number between 1 and half the parameters, or "half" to forget + half the parameters. + ignore_fidelity: Whether to ignore fidelity when sampling. If set to "highest + fidelity", the highest fidelity values will be used. If True, fidelity + values will be sampled randomly. + + Raises: + ValueError: If the pipeline is not a Pipeline object. + """ + self._pipeline = pipeline + + self._environment_values = {} + fidelity_attrs = self._pipeline.fidelity_attrs + for fidelity_name, fidelity_obj in fidelity_attrs.items(): + # If the user specifically asked for the highest fidelity, use that. + if ignore_fidelity == "highest fidelity": + self._environment_values[fidelity_name] = fidelity_obj.max_value + # If the user asked to ignore fidelities, sample a value randomly from the + # domain. + elif ignore_fidelity is True: + # Sample randomly from the fidelity bounds. + if isinstance(fidelity_obj._domain, Integer): + assert isinstance(fidelity_obj.min_value, int) + assert isinstance(fidelity_obj.max_value, int) + self._environment_values[fidelity_name] = random.randint( + fidelity_obj.min_value, fidelity_obj.max_value + ) + elif isinstance(fidelity_obj._domain, Float): + self._environment_values[fidelity_name] = random.uniform( + fidelity_obj.min_value, fidelity_obj.max_value + ) + # By default we don't support fidelities unless explicitly requested. + else: + raise ValueError( + "ComplexRandomSearch does not support fidelities by default. Consider" + " using a different optimizer or setting `ignore_fidelity=True` or" + " `highest fidelity`." + ) + + self._random_or_prior_sampler: RandomSampler | PriorOrFallbackSampler = ( + RandomSampler( + predefined_samplings={}, + ) + ) + if use_priors: + self._random_or_prior_sampler = PriorOrFallbackSampler( + fallback_sampler=self._random_or_prior_sampler + ) + assert population_size >= tournament_size, ( + "Population size must be greater than or equal to tournament size." + ) + self._tournament_size = tournament_size + self._population_size = population_size + self._mutation_type = mutation_type + self._n_mutations = n_mutations + self._n_forgets = n_forgets + + def _mutate_best( + self, top_trial_config: Mapping[str, Any] + ) -> tuple[PipelineSpace, SamplingResolutionContext]: + """Mutate the best trial's config by resampling or forgetting parameters. + + Args: + top_trial_config: The configuration of the best trial to mutate. + + Returns: + A mutated configuration (PipelineSpace and context tuple). + + Raises: + ValueError: If both n_mutations and n_forgets are None. + """ + if self._n_mutations: + n_mut = ( + self._n_mutations + if isinstance(self._n_mutations, int) + else ( + random.randint(1, len(top_trial_config) // 2) + if self._n_mutations == "random" + else len(top_trial_config) // 2 + ) + ) + return resolve( + pipeline=self._pipeline, + domain_sampler=MutatateUsingCentersSampler( + predefined_samplings=top_trial_config, + n_mutations=n_mut, + ), + environment_values=self._environment_values, + ) + if self._n_forgets: + n_forg = ( + self._n_forgets + if isinstance(self._n_forgets, int) + else ( + random.randint(1, len(top_trial_config) // 2) + if self._n_forgets == "random" + else max(1, len(top_trial_config) // 2) + ) + ) + return resolve( + pipeline=self._pipeline, + domain_sampler=MutateByForgettingSampler( + predefined_samplings=top_trial_config, + n_forgets=n_forg, + ), + environment_values=self._environment_values, + ) + raise ValueError("At least one of n_mutations or n_forgets must not be None.") + + def _crossover_top_2( + self, sorted_trials: list[Trial] + ) -> tuple[PipelineSpace, SamplingResolutionContext]: + """Perform crossover between top trials from the tournament. + + Args: + sorted_trials: List of configurations sorted by objective (best first). + + Returns: + A configuration created by crossover (PipelineSpace and context tuple), + or a mutated config if crossover fails. + """ + # Create all possible crossovers between the top trials, sorted by smallest + # combined index. + all_crossovers = [ + (x, y) + for x in range(len(sorted_trials)) + for y in range(len(sorted_trials)) + if x < y + ] + all_crossovers.sort(key=lambda pair: pair[0] + pair[1]) + + for n, (config_1, config_2) in enumerate(all_crossovers): + top_trial_config = sorted_trials[config_1].config + second_best_trial_config = sorted_trials[config_2].config + + # Crossover between the best two trials' configs to create a new config. + try: + crossover_sampler = CrossoverByMixingSampler( + predefined_samplings_1=top_trial_config, + predefined_samplings_2=second_best_trial_config, + prefer_first_probability=0.5, + ) + except CrossoverNotPossibleError: + # A crossover was not possible for them. Increase configs and try again. + # If we have tried all crossovers, mutate the best instead. + if n == len(all_crossovers) - 1: + # Mutate 50% of the top trial's config. + return resolve( + pipeline=self._pipeline, + domain_sampler=MutatateUsingCentersSampler( + predefined_samplings=top_trial_config, + n_mutations=max(1, int(len(top_trial_config) / 2)), + ), + environment_values=self._environment_values, + ) + continue + else: + return resolve( + pipeline=self._pipeline, + domain_sampler=crossover_sampler, + environment_values=self._environment_values, + ) + + # Fallback in case all crossovers fail (shouldn't happen, but be safe) + return self._mutate_best(sorted_trials[0].config) + + def __call__( + self, + trials: Mapping[str, trial_state.Trial], + budget_info: optimizer_state.BudgetInfo | None, + n: int | None = None, + ) -> optimizer.SampledConfig | list[optimizer.SampledConfig]: + """Sample configurations randomly from the pipeline's domain and environment + values, and also perform mutations and crossovers based on previous successful + trials. + + Args: + trials: A mapping of trial IDs to Trial objects, representing previous + trials. + budget_info: The budget information for the optimization process. + n: The number of configurations to sample. If None, a single configuration + will be sampled. + + Returns: + A SampledConfig object or a list of SampledConfig objects, depending + on the value of n. + + Raises: + ValueError: If the pipeline is not a Pipeline object or if the trials are + not a valid mapping of trial IDs to Trial objects. + """ + n_prev_trials = len(trials) + n_requested = 1 if n is None else n + + if n_prev_trials < self._population_size: + # Just do random sampling until we have enough trials. + random_pipelines = [ + resolve( + pipeline=self._pipeline, + domain_sampler=self._random_or_prior_sampler, + environment_values=self._environment_values, + ) + for _ in range(n_requested) + ] + + return _prepare_sampled_configs( + random_pipelines, n_prev_trials, n_requested == 1 + ) + + successful_trials: list[Trial] = list( + filter( + lambda trial: ( + trial.report.reported_as == trial.State.SUCCESS + if trial.report is not None + else False + ), + trials.values(), + ) + ) + + # If we have no successful trials yet, fall back to random sampling. + if len(successful_trials) == 0: + random_pipelines = [ + resolve( + pipeline=self._pipeline, + domain_sampler=self._random_or_prior_sampler, + environment_values=self._environment_values, + ) + for _ in range(n_requested) + ] + + return _prepare_sampled_configs( + random_pipelines, n_prev_trials, n_requested == 1 + ) + + return_pipelines = [] + + for _ in range(n_requested): + # Select the most recent trials to form the tournament. + # We want the last (most recent) self._population_size successful trials. + latest_trials = heapq.nlargest( + self._population_size, + successful_trials, + key=lambda trial: ( + trial.metadata.time_end + if trial.metadata and isinstance(trial.metadata.time_end, float) + else 0.0 + ), + ) + + tournament_trials = [ + random.sample((latest_trials), k=1)[0] + for _ in range(min(self._tournament_size, len(latest_trials))) + ] + + # Sort the tournament by objective and pick the best as the parent. + def _obj_key(trial: Trial) -> float: + return ( + float(trial.report.objective_to_minimize) + if trial.report + and isinstance(trial.report.objective_to_minimize, float) + else float("inf") + ) + + sorted_trials = sorted(tournament_trials, key=_obj_key) + + top_trial_config = sorted_trials[0].config + + # Mutate or crossover the best trial's config to create a new config. + if self._mutation_type == "mutate_best": + mutated_incumbent = self._mutate_best(top_trial_config) + return_pipelines.append(mutated_incumbent) + elif self._mutation_type == "crossover_top_2": + crossed_over_incumbent = self._crossover_top_2(sorted_trials) + return_pipelines.append(crossed_over_incumbent) + elif isinstance(self._mutation_type, float): + if self._mutation_type < 0.0 or self._mutation_type > 1.0: + raise ValueError( + f"Invalid mutation probability: {self._mutation_type}. " + "It must be between 0.0 and 1.0." + ) + rand_val = random.random() + + if rand_val < self._mutation_type: + return_pipelines.append(self._mutate_best(top_trial_config)) + else: + return_pipelines.append(self._crossover_top_2(sorted_trials)) + else: + raise ValueError(f"Invalid mutation type: {self._mutation_type}") + + return _prepare_sampled_configs(return_pipelines, n_prev_trials, n_requested == 1) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index c608a8932..dfe0357ea 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -1332,6 +1332,8 @@ class NEPSSpace(PipelineSpace): "complex_random_search", "neps_hyperband", "complex_hyperband", + "neps_regularized_evolution", + "regularized_evolution", ] CLASSIC_AND_NEPS_ALGORITHMS_NAMES = [ "random_search", @@ -1350,6 +1352,7 @@ def _get_only_neps_algorithms_functions() -> list[Callable]: algorithms.complex_random_search, algorithms.neps_hyperband, algorithms.neps_grid_search, + algorithms.neps_regularized_evolution, ] From 4c19b71711523721f4840a8b2418212ec782a2b5 Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 31 Oct 2025 12:05:21 +0100 Subject: [PATCH 100/156] fix: update best score calculation to use actual best from trajectory and improve config selection logic --- neps/runtime.py | 28 ++- neps/status/status.py | 3 +- .../test_trajectory_and_metrics.py | 173 +++++++++++++++++- 3 files changed, 195 insertions(+), 9 deletions(-) rename tests/{test_neps_space => test_runtime}/test_trajectory_and_metrics.py (70%) diff --git a/neps/runtime.py b/neps/runtime.py index f0473a2e1..aa103cc83 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -604,12 +604,16 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 self.optimizer, ) + # FIX: Use the actual best score from trajectory, not state.new_score + # state.new_score may have been set to the last processed trial, not the best _best_score_so_far = float("inf") - if ( - self.state.new_score is not None - and self.state.new_score != _best_score_so_far - ): - _best_score_so_far = self.state.new_score + if self.state.all_best_configs: + # Get the true minimum from all best configs + _best_score_so_far = min( + config["score"] for config in self.state.all_best_configs + ) + # Update state.new_score to the actual best + self.state.new_score = _best_score_so_far optimizer_name = self.state._optimizer_info["name"] logger.info("Using optimizer: %s", optimizer_name) @@ -838,6 +842,9 @@ def load_incumbent_trace( # noqa: C901, PLR0912 best_config_path (Path): Path to the best configuration file. optimizer (AskFunction): The optimizer used for sampling configurations. """ + # Clear any existing entries to prevent duplicates + state.all_best_configs.clear() + _best_score_so_far = float("inf") metrics = { @@ -846,7 +853,16 @@ def load_incumbent_trace( # noqa: C901, PLR0912 "cumulative_cost": 0.0, "cumulative_time": 0.0, } - for evaluated_trial in previous_trials.values(): + + # FIX: Sort trials chronologically to maintain trajectory monotonicity + sorted_trials = sorted( + previous_trials.values(), + key=lambda t: ( + t.metadata.time_sampled if t.metadata.time_sampled else float("inf") + ), + ) + + for evaluated_trial in sorted_trials: if ( evaluated_trial.report is not None and evaluated_trial.report.objective_to_minimize is not None diff --git a/neps/status/status.py b/neps/status/status.py index af427d3f3..8b4ee7812 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -67,7 +67,8 @@ def _build_trace_texts(best_configs: list[dict]) -> tuple[str, str]: best_config_text = "" if best_configs: - best_config = best_configs[-1] # Latest best + # FIX: Find the actual best config by minimum score, not just the last one + best_config = min(best_configs, key=lambda c: c["score"]) best_config_text = ( "# Best config:" f"\n\n Config ID: {best_config['trial_id']}" diff --git a/tests/test_neps_space/test_trajectory_and_metrics.py b/tests/test_runtime/test_trajectory_and_metrics.py similarity index 70% rename from tests/test_neps_space/test_trajectory_and_metrics.py rename to tests/test_runtime/test_trajectory_and_metrics.py index 91b6de73e..b50cbd15d 100644 --- a/tests/test_neps_space/test_trajectory_and_metrics.py +++ b/tests/test_runtime/test_trajectory_and_metrics.py @@ -266,7 +266,7 @@ def test_neps_hyperband_metrics(): pipeline_space=SpaceWithFidelity(), optimizer=algorithms.neps_hyperband, root_directory=str(root_directory), - fidelities_to_spend=20, # Use fidelities_to_spend for multi-fidelity optimizers + fidelities_to_spend=20, # Use fidelities_to_spend for mf optimizers overwrite_root_directory=True, ) @@ -499,7 +499,176 @@ def test_neps_revisit_run_with_trajectory(): assert "Config ID:" in updated_trajectory assert "Objective to minimize:" in updated_trajectory - # The updated content should be at least as long (potentially with timing info added) + # The updated content should be at least as long (potentially with timing info + # added) assert len(updated_trajectory) >= len(initial_trajectory), ( "Updated trajectory should have at least the same content" ) + + +@pytest.mark.parametrize("run_number", range(10)) +def test_continue_finished_run_with_higher_budget(run_number): + """Test continuing a finished run with overwrite_root_directory=False. + + This test runs 10 times with different random seeds to catch any + non-deterministic bugs in state management. + + Uses a HIGH initial budget to likely find a very good solution, then continues + with a SMALL additional budget. This makes bugs more obvious because: + - New evaluations will likely be worse than the initial best + - If bugs exist, they'll cause the worse configs to incorrectly appear as "best" + - Tests that the incumbent is properly preserved across continuation + + Verifies: + 1. Best config file contains the actual best configuration from all evaluations + 2. Cumulative metrics in best_config.txt are correct + 3. The best found improves or stays the same compared to initial run + 4. Trajectory objectives are monotonically non-increasing + """ + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / f"continue_test_run_{run_number}" + + # First run - complete optimization with HIGH budget to find a good solution + initial_budget = 20 + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=initial_budget, + overwrite_root_directory=True, + ) # Verify first run completed + summary_dir = root_directory / "summary" + best_config_file = summary_dir / "best_config.txt" + trajectory_file = summary_dir / "best_config_trajectory.txt" + + assert best_config_file.exists() + assert trajectory_file.exists() + + # Read initial results + initial_best_config = best_config_file.read_text() + trajectory_file.read_text() + + # Extract initial best objective + initial_obj_match = re.search( + r"Objective to minimize: ([\d.]+)", initial_best_config + ) + assert initial_obj_match is not None + initial_best_objective = float(initial_obj_match.group(1)) + + # Extract initial cumulative evaluations + initial_cum_match = re.search( + r"Cumulative evaluations: (\d+)", initial_best_config + ) + assert initial_cum_match is not None + initial_cumulative = int(initial_cum_match.group(1)) + + # Second run - continue with SMALL additional budget + # With the high initial budget, we likely found a good solution + # New evaluations will probably be worse, exposing bugs if they exist + additional_budget = 5 + total_expected_budget = initial_budget + additional_budget + + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=SimpleSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=additional_budget, + overwrite_root_directory=False, # Continue from previous run + ) + + # Read final results + final_best_config = best_config_file.read_text() + final_trajectory = trajectory_file.read_text() + + # ===== Test 1: Verify best_config.txt contains a valid best configuration ===== + final_obj_match = re.search(r"Objective to minimize: ([\d.]+)", final_best_config) + assert final_obj_match is not None + final_best_objective = float(final_obj_match.group(1)) + + # The final best should be <= initial best (can only improve or stay same) + assert final_best_objective <= initial_best_objective, ( + "Best objective regressed when continuing run: " + f"initial={initial_best_objective}, final={final_best_objective}" + ) + + # ===== Test 2: Verify cumulative metrics are correct ===== + # Extract cumulative evaluations from best_config.txt + final_cum_match = re.search(r"Cumulative evaluations: (\d+)", final_best_config) + assert final_cum_match is not None, "Should have cumulative evaluations" + final_cumulative_evals = int(final_cum_match.group(1)) + + # The cumulative evaluations should be >= initial cumulative + # (we added more evaluations in the second run) + assert final_cumulative_evals >= initial_cumulative, ( + f"Final cumulative evaluations {final_cumulative_evals} should be >= " + f"initial cumulative {initial_cumulative}" + ) + + # The cumulative evaluations should be <= total budget + # (may be less if some evaluations failed or optimizer stopped early) + assert final_cumulative_evals <= total_expected_budget, ( + f"Cumulative evaluations {final_cumulative_evals} should not exceed " + f"total budget {total_expected_budget}" + ) + + # ===== Test 3: Verify trajectory contains the improvement history ===== + trajectory_objectives = re.findall( + r"Objective to minimize: ([\d.]+)", final_trajectory + ) + assert len(trajectory_objectives) > 0, "Should have trajectory objectives" + + # Convert to floats + objectives = [float(obj) for obj in trajectory_objectives] + + # The minimum objective in trajectory should be the true best across all runs + min_trajectory_objective = min(objectives) + + # The best_config.txt should show the minimum objective from the trajectory + assert abs(final_best_objective - min_trajectory_objective) < 1e-10, ( + f"best_config.txt shows {final_best_objective} but " + f"trajectory minimum is {min_trajectory_objective}" + ) + + # ===== Test 4: Verify trajectory is monotonically non-increasing ===== + # The trajectory should be monotonically non-increasing (each entry should be + # better than or equal to the previous) + is_monotonic = all( + objectives[i] <= objectives[i - 1] for i in range(1, len(objectives)) + ) + + assert is_monotonic, ( + f"Trajectory is not monotonically non-increasing: {objectives}" + ) + + # ===== Test 5: Verify the config in best_config.txt is valid ===== + # The format is "Config: {dict}" so we look for that pattern + assert "Config:" in final_best_config, "Should have config section" + + # Should contain parameter values (x and y, possibly with SAMPLING__ prefix) + assert any( + param in final_best_config + for param in ["x", "y", "'x'", "'y'", "SAMPLING__Resolvable"] + ), "Config should contain parameter values" + + # ===== Test 6: Verify the objective value is in the valid range ===== + assert 1.0 <= final_best_objective <= 11.0, ( + f"Best objective {final_best_objective} should be in range [1.0, 11.0]" + ) + + # ===== Test 7: Verify trajectory file format is correct ===== + assert ( + "Best configs and their objectives across evaluations:" in final_trajectory + ), "Should have trajectory header" + + assert "Config ID:" in final_trajectory, "Should have config IDs" + assert ( + "--------------------------------------------------------------------------------" + in final_trajectory + ), "Should have separator lines" + + # ===== Test 8: Verify we actually did more evaluations ===== + # Count config entries in trajectory (though trajectory only shows improvements) + config_count = final_trajectory.count("Config ID:") + assert config_count >= 1, "Should have at least one config entry" From d74f551078ab7087dc21a4df9b5adf97e3039309 Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 31 Oct 2025 17:06:15 +0100 Subject: [PATCH 101/156] fix: update docstring for NePSRegularizedEvolution to clarify functionality and parameters --- neps/optimizers/neps_regularized_evolution.py | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/neps/optimizers/neps_regularized_evolution.py b/neps/optimizers/neps_regularized_evolution.py index 42f926bb1..e49d94ac3 100644 --- a/neps/optimizers/neps_regularized_evolution.py +++ b/neps/optimizers/neps_regularized_evolution.py @@ -32,15 +32,27 @@ @dataclass class NePSRegularizedEvolution: - """A complex random search optimizer for a NePS pipeline. - It samples configurations randomly from the pipeline's domain and environment values, - and also performs mutations and crossovers based on previous successful trials. + """A Regularized Evolution optimizer for a NePS pipeline. + It samples configurations based on mutations and crossovers of previous successful + trials, using a tournament selection mechanism. Args: - pipeline: The pipeline to optimize, which should be a Pipeline object. - - Raises: - ValueError: If the pipeline is not a Pipeline object. + pipeline: The pipeline to optimize. + population_size: The size of the population for evolution. + tournament_size: The size of the tournament for selecting parents. + use_priors: Whether to use priors when sampling if available. + mutation_type: The type of mutation to use (e.g., "mutate_best", + "crossover_top_2"). If a float is provided, it is interpreted as the + probability of choosing mutation, compared to the probability of crossover. + n_mutations: The number of mutations to apply. A fixed integer, "random" for + a random number between 1 and half the parameters, or "half" to mutate + half the parameters. + n_forgets: The number of parameters to forget. A fixed integer, "random" for + a random number between 1 and half the parameters, or "half" to forget + half the parameters. + ignore_fidelity: Whether to ignore fidelity when sampling. If set to "highest + fidelity", the highest fidelity values will be used. If True, fidelity + values will be sampled randomly. """ def __init__( @@ -103,9 +115,9 @@ def __init__( # By default we don't support fidelities unless explicitly requested. else: raise ValueError( - "ComplexRandomSearch does not support fidelities by default. Consider" - " using a different optimizer or setting `ignore_fidelity=True` or" - " `highest fidelity`." + "RegularizedEvolution does not support fidelities by default. " + "Consider using a different optimizer or setting " + "`ignore_fidelity=True` or `highest fidelity`." ) self._random_or_prior_sampler: RandomSampler | PriorOrFallbackSampler = ( From 9cd62796f4bb25cd34df95e3f60108d4cf93e7d1 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 2 Nov 2025 22:36:28 +0100 Subject: [PATCH 102/156] Refactor parameter definitions in PipelineSpace classes to use 'lower' and 'upper' instead of 'min_value' and 'max_value' Introduce import_trials for neps_bracket_optimizer and change import logic to accomodate for NePS-spaces. - Updated parameter definitions in various test files to replace 'min_value' and 'max_value' with 'lower' and 'upper' for Float and Integer types. - Added a new test script for normalization functionality in PipelineSpace. - Ensured that all tests reflect the new parameter naming conventions. - Verified that extra keys are removed during normalization and that expected keys are present in the normalized configuration. --- docs/getting_started.md | 10 +- docs/index.md | 4 +- docs/reference/neps_spaces.md | 16 +- neps/api.py | 64 +- neps/normalization.py | 69 +- neps/optimizers/__init__.py | 3 + neps/optimizers/algorithms.py | 11 +- neps/optimizers/bracket_optimizer.py | 2 +- neps/optimizers/neps_bracket_optimizer.py | 73 +- neps/optimizers/neps_priorband.py | 4 +- neps/optimizers/neps_random_search.py | 20 +- neps/optimizers/utils/grid.py | 6 +- neps/runtime.py | 3 + neps/space/neps_spaces/neps_space.py | 87 +- neps/space/neps_spaces/parameters.py | 212 ++- neps/space/neps_spaces/sampling.py | 12 +- neps/space/parameters.py | 16 - neps/state/neps_state.py | 9 +- neps/utils/trial_io.py | 6 +- neps/validation.py | 162 +- .../basic_usage/example_import_trials.py | 276 ++-- neps_examples/basic_usage/hyperparameters.py | 10 +- .../convenience/logging_additional_info.py | 8 +- .../convenience/neps_tblogger_tutorial.py | 4 +- .../convenience/running_on_slurm_scripts.py | 2 +- .../working_directory_per_pipeline.py | 4 +- .../expert_priors_for_hyperparameters.py | 8 +- neps_examples/efficiency/multi_fidelity.py | 6 +- .../multi_fidelity_and_expert_priors.py | 14 +- .../efficiency/pytorch_lightning_ddp.py | 8 +- .../efficiency/pytorch_lightning_fsdp.py | 8 +- .../efficiency/pytorch_native_ddp.py | 14 +- .../efficiency/pytorch_native_fsdp.py | 4 +- .../test_files/algo_comparisons.ipynb | 161 ++ neps_examples/test_files/algo_tests.ipynb | 1403 +++++++++++++++++ neps_examples/test_files/priors_test.ipynb | 400 +++++ test_normalization_fix.py | 38 + .../test_basic_functionality.py | 4 +- .../test_neps_space/test_domain__centering.py | 96 +- .../test_neps_space/test_neps_integration.py | 64 +- ...st_neps_integration_priorband__max_cost.py | 16 +- ...t_neps_integration_priorband__max_evals.py | 16 +- .../test_pipeline_space_methods.py | 36 +- .../test_search_space__fidelity.py | 12 +- .../test_search_space__hnas_like.py | 2 +- .../test_search_space__recursion.py | 2 +- .../test_search_space__resampled.py | 6 +- .../test_search_space__reuse_arch_elements.py | 8 +- ...test_space_conversion_and_compatibility.py | 22 +- .../test_trajectory_and_metrics.py | 10 +- 50 files changed, 2911 insertions(+), 540 deletions(-) create mode 100644 neps_examples/test_files/algo_comparisons.ipynb create mode 100644 neps_examples/test_files/algo_tests.ipynb create mode 100644 neps_examples/test_files/priors_test.ipynb create mode 100644 test_normalization_fix.py diff --git a/docs/getting_started.md b/docs/getting_started.md index eb49ecf48..749deaba7 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -18,12 +18,12 @@ pip install neural-pipeline-search ```python class ExampleSpace(neps.PipelineSpace): # Define the parameters of your search space - some_parameter = neps.Float(min_value=0.0, max_value=1.0) # float - another_parameter = neps.Integer(min_value=0, max_value=10) # integer + some_parameter = neps.Float(lower=0.0, upper=1.0) # float + another_parameter = neps.Integer(lower=0, upper=10) # integer optimizer = neps.Categorical(choices=("sgd", "adam")) # categorical - epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=100)) - learning_rate = neps.Float(min_value=1e-5, max_value=1, log=True) - alpha = neps.Float(min_value=0.1, max_value=1.0, prior=0.99, prior_confidence="high") + epoch = neps.Fidelity(neps.Integer(lower=1, upper=100)) + learning_rate = neps.Float(lower=1e-5, upper=1, log=True) + alpha = neps.Float(lower=0.1, upper=1.0, prior=0.99, prior_confidence="high") ``` 2. **Define an `evaluate_pipeline()` function**: diff --git a/docs/index.md b/docs/index.md index b8deea666..aa4ad38dc 100644 --- a/docs/index.md +++ b/docs/index.md @@ -70,8 +70,8 @@ def evaluate_pipeline(hyperparameter_a: float, hyperparameter_b: int, architectu # 2. Define a search space of parameters; use the same parameter names as in evaluate_pipeline class ExampleSpace(neps.PipelineSpace): - hyperparameter_a = neps.Float(min_value=0.001, max_value=0.1, log=True) # Log scale parameter - hyperparameter_b = neps.Integer(min_value=1, max_value=42) + hyperparameter_a = neps.Float(lower=0.001, upper=0.1, log=True) # Log scale parameter + hyperparameter_b = neps.Integer(lower=1, upper=42) architecture_parameter = neps.Categorical(choices=("option_a", "option_b")) # 3. Run the NePS optimization diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 5b0f2cf31..323dd5526 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -18,8 +18,8 @@ A **NePS space** is defined as a subclass of [`PipelineSpace`][neps.space.neps_s import neps class MySpace(neps.PipelineSpace): - float_param = neps.Float(min_value=0.1, max_value=1.0) - int_param = neps.Integer(min_value=1, max_value=10) + float_param = neps.Float(lower=0.1, upper=1.0) + int_param = neps.Integer(lower=1, upper=10) cat_param = neps.Categorical(choices=("A", "B", "C")) ``` @@ -64,9 +64,9 @@ For more details on how to use priors, see the [Priors](../reference/search_algo ```python space = MySpace() # Adding a new parameter, this will appear as param_n where n is the next available index - space = space + neps.Float(min_value=0.01, max_value=0.1) + space = space + neps.Float(lower=0.01, upper=0.1) # Or using the add() method, this allows you to specify a name - space = space.add(neps.Integer(min_value=5, max_value=15), name="new_int_param") + space = space.add(neps.Integer(lower=5, upper=15), name="new_int_param") # Removing a parameter by its name space = space.remove("cat_param") ``` @@ -103,7 +103,7 @@ Operation also allow for (keyword-)arguments to be defined, including other para batch_size = neps.Categorical(choices=(16, 32, 64)) - _layer_size = neps.Integer(min_value=80, max_value=100) + _layer_size = neps.Integer(lower=80, upper=100) hidden_layer = neps.Operation( operator=torch.nn.Linear, @@ -156,7 +156,7 @@ With `neps.Resampled` you can reuse a parameter, even themselves recursively, bu ```python class ResampledSpace(neps.PipelineSpace): - float_param = neps.Float(min_value=0, max_value=1) + float_param = neps.Float(lower=0, upper=1) # The resampled parameter will have the same range but will be sampled # independently, so it can take a different value than its source @@ -167,7 +167,7 @@ This is especially useful for defining complex architectures, where e.g. a cell ```python class CNN_Space(neps.PipelineSpace): - _kernel_size = neps.Integer(min_value=5, max_value=8) + _kernel_size = neps.Integer(lower=5, upper=8) # Define a cell block that can be resampled # It will resample a new kernel size from _kernel_size each time @@ -205,7 +205,7 @@ def evaluate_pipeline(cnn: torch.nn.Module): ) # This results in a (possibly infinite) tuple of independently sampled future_params - future_param = Float(min_value=0, max_value=5) + future_param = Float(lower=0, upper=5) ``` !!! tip "Complex structural spaces" diff --git a/neps/api.py b/neps/api.py index b7155c3b9..a5ad10471 100644 --- a/neps/api.py +++ b/neps/api.py @@ -3,6 +3,7 @@ from __future__ import annotations import logging +import shutil import warnings from collections.abc import Callable, Mapping, Sequence from pathlib import Path @@ -91,14 +92,14 @@ class MySpace(PipelineSpace): ("adam", "sgd", "rmsprop") ) learning_rate = neps.Float( # log spaced float - min_value=1e-5, max_value=1, log=True + lower=1e-5, upper=1, log=True ) epochs = neps.Fidelity( # fidelity integer neps.Integer(1, 100) ) batch_size = neps.Integer( # integer with a prior - min_value=32, - max_value=512, + lower=32, + upper=512, prior=128, prior_confidence="medium" ) @@ -148,14 +149,14 @@ class MySpace(PipelineSpace): ("adam", "sgd", "rmsprop") ) learning_rate = neps.Float( # log spaced float - min_value=1e-5, max_value=1, log=True + lower=1e-5, upper=1, log=True ) epochs = neps.Fidelity( # fidelity integer neps.Integer(1, 100) ) batch_size = neps.Integer( # integer with a prior - min_value=32, - max_value=512, + lower=32, + upper=512, prior=128, prior_confidence="medium" ) @@ -401,7 +402,7 @@ def __call__( if isinstance(space, PipelineSpace) and neps_classic_space_compatibility == "classic": raise ValueError( "The provided optimizer is not compatible with this complex search space. " - "Please use one that is, such as 'random_search', 'hyperband'" + "Please use one that is, such as 'random_search', 'hyperband', " "'priorband', or 'complex_random_search'." ) @@ -482,7 +483,7 @@ def __call__( summary_dir = root_directory / "summary" if write_summary_to_disk: logger.info( - "The summary folder has been created, which contains csv and txt files with" + "The summary folder has been created, which contains csv and txt files with " "the output of all data in the run (short.csv - only the best; full.csv - " "all runs; best_config_trajectory.txt for incumbent trajectory; and " "best_config.txt for final incumbent)." @@ -530,9 +531,10 @@ def save_pipeline_results( def import_trials( - pipeline_space: SearchSpace, + pipeline_space: SearchSpace | PipelineSpace, evaluated_trials: Sequence[tuple[Mapping[str, Any], UserResultDict],], root_directory: Path | str, + overwrite_root_directory: bool = False, # noqa: FBT001, FBT002 optimizer: ( OptimizerChoice | Mapping[str, Any] @@ -555,6 +557,8 @@ def import_trials( A sequence of tuples, each containing a configuration dictionary and its corresponding result. root_directory (Path or str): The root directory of the NePS run. + overwrite_root_directory (bool, optional): If True, overwrite the existing + root directory. Defaults to False. optimizer: The optimizer to use for importing trials. Can be a string, mapping, tuple, callable, or CustomOptimizer. Defaults to "auto". @@ -580,7 +584,37 @@ def import_trials( if isinstance(root_directory, str): root_directory = Path(root_directory) - optimizer_ask, optimizer_info = load_optimizer(optimizer, pipeline_space) + neps_classic_space_compatibility = check_neps_space_compatibility(optimizer) + if neps_classic_space_compatibility in ["both", "classic"] and isinstance( + pipeline_space, PipelineSpace + ): + converted_space = convert_neps_to_classic_search_space(pipeline_space) + if converted_space: + pipeline_space = converted_space + space = convert_to_space(pipeline_space) + + if neps_classic_space_compatibility == "neps" and not isinstance( + space, PipelineSpace + ): + space = convert_classic_to_neps_search_space(space) + + # Optimizer check, if the search space is a Pipeline and the optimizer is not a NEPS + # algorithm, we raise an error, as the optimizer is not compatible. + if isinstance(space, PipelineSpace) and neps_classic_space_compatibility == "classic": + raise ValueError( + "The provided optimizer is not compatible with this complex search space. " + "Please use one that is, such as 'random_search', 'hyperband', " + "'priorband', or 'complex_random_search'." + ) + + optimizer_ask, optimizer_info = load_optimizer(optimizer, space) + + if overwrite_root_directory and root_directory.exists(): + logger.info( + f"Overwriting root directory '{root_directory}' as" + " `overwrite_root_directory=True`." + ) + shutil.rmtree(root_directory) state = NePSState.create_or_load( root_directory, @@ -592,9 +626,9 @@ def import_trials( normalized_trials = [] for config, result in evaluated_trials: - _validate_imported_config(pipeline_space, config) + _validate_imported_config(space, config) _validate_imported_result(result) - normalized_config = _normalize_imported_config(pipeline_space, config) + normalized_config = _normalize_imported_config(space, config) normalized_trials.append((normalized_config, result)) with state._trial_lock.lock(): @@ -603,11 +637,17 @@ def import_trials( existing_configs = [ tuple(sorted(t.config.items())) for t in state_trials.values() ] + num_before_dedup = len(normalized_trials) normalized_trials = [ t for t in normalized_trials if tuple(sorted(t[0].items())) not in existing_configs ] + num_duplicates = num_before_dedup - len(normalized_trials) + if num_duplicates > 0: + logger.info( + f"Skipped {num_duplicates} duplicate trial(s) (already exist in state)." + ) imported_trials = optimizer_ask.import_trials( external_evaluations=normalized_trials, diff --git a/neps/normalization.py b/neps/normalization.py index 1e07ba9e5..9d827c413 100644 --- a/neps/normalization.py +++ b/neps/normalization.py @@ -6,13 +6,17 @@ from collections.abc import Mapping from typing import TYPE_CHECKING +from neps.space import SearchSpace + if TYPE_CHECKING: - from neps.space import SearchSpace + from neps.space.neps_spaces.parameters import PipelineSpace logger = logging.getLogger(__name__) -def _normalize_imported_config(space: SearchSpace, config: Mapping[str, float]) -> dict: +def _normalize_imported_config( # noqa: C901, PLR0912 + space: SearchSpace | PipelineSpace, config: Mapping[str, float] +) -> dict: """Completes a configuration by adding default values for missing fidelities. Args: @@ -22,16 +26,71 @@ def _normalize_imported_config(space: SearchSpace, config: Mapping[str, float]) Returns: A new, completed configuration dictionary. """ - all_param_keys = set(space.searchables.keys()) | set(space.fidelities.keys()) + if isinstance(space, SearchSpace): + all_param_keys = set(space.searchables.keys()) | set(space.fidelities.keys()) + else: + # For PipelineSpace, we need to generate the prefixed keys + # Import here to avoid circular import + from neps.space.neps_spaces.neps_space import ( + NepsCompatConverter, + construct_sampling_path, + ) + from neps.space.neps_spaces.parameters import Domain + + all_param_keys = set() + + # Add SAMPLING__ prefixed keys for each parameter + for param_name, param_obj in space.get_attrs().items(): + # Construct the sampling path for this parameter + if isinstance(param_obj, Domain): + sampling_path = construct_sampling_path( + path_parts=["Resolvable", param_name], + domain_obj=param_obj, + ) + all_param_keys.add( + f"{NepsCompatConverter._SAMPLING_PREFIX}{sampling_path}" + ) + + # Add ENVIRONMENT__ prefixed keys for fidelities + for fidelity_name in space.fidelity_attrs: + all_param_keys.add( + f"{NepsCompatConverter._ENVIRONMENT_PREFIX}{fidelity_name}" + ) # copy to avoid modifying the original config normalized_conf = dict(config) - for key, param in space.fidelities.items(): + fidelities = ( + space.fidelities if isinstance(space, SearchSpace) else space.fidelity_attrs + ) + for key, param in fidelities.items(): if key not in normalized_conf: normalized_conf[key] = param.upper - extra_keys = set(normalized_conf.keys()) - all_param_keys + if isinstance(space, SearchSpace): + extra_keys = set(normalized_conf.keys()) - all_param_keys + else: + # For PipelineSpace, filter out keys that match the expected patterns + # Import here to avoid circular import (needed for prefix constants) + from neps.space.neps_spaces.neps_space import NepsCompatConverter + + extra_keys = set() + for key in normalized_conf: + if not ( + key.startswith( + ( + NepsCompatConverter._SAMPLING_PREFIX, + NepsCompatConverter._ENVIRONMENT_PREFIX, + ) + ) + ): + # Check if it's a plain parameter name (without prefix) + if key not in space.get_attrs() and key not in space.fidelity_attrs: + extra_keys.add(key) + elif key not in all_param_keys: + # It has a prefix but doesn't match expected sampling/environment keys + extra_keys.add(key) + if extra_keys: logger.warning(f"Unknown parameters in config: {extra_keys}, discarding them") for k in extra_keys: diff --git a/neps/optimizers/__init__.py b/neps/optimizers/__init__.py index e161c1f61..29b07baad 100644 --- a/neps/optimizers/__init__.py +++ b/neps/optimizers/__init__.py @@ -39,6 +39,9 @@ def _load_optimizer_from_string( keywords = extract_keyword_defaults(optimizer_build) optimizer_kwargs = optimizer_kwargs or {} + optimizer_kwargs = dict(optimizer_kwargs) # Make mutable copy + if _optimizer == "primo": + optimizer_kwargs["prior_centers"] = optimizer_kwargs.get("prior_centers", {}) opt = optimizer_build(space, **optimizer_kwargs) # type: ignore info = OptimizerInfo(name=_optimizer, info={**keywords, **optimizer_kwargs}) return opt, info diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index b434aeb12..e9dad13ab 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -1676,7 +1676,7 @@ def _neps_bracket_optimizer( case "successive_halving": assert early_stopping_rate is not None rung_to_fidelity, rung_sizes = brackets.calculate_sh_rungs( - bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + bounds=(fidelity_obj.lower, fidelity_obj.upper), eta=eta, early_stopping_rate=early_stopping_rate, ) @@ -1688,7 +1688,7 @@ def _neps_bracket_optimizer( case "hyperband": assert early_stopping_rate is None rung_to_fidelity, bracket_layouts = brackets.calculate_hb_bracket_layouts( - bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + bounds=(fidelity_obj.lower, fidelity_obj.upper), eta=eta, ) create_brackets = partial( @@ -1699,7 +1699,7 @@ def _neps_bracket_optimizer( case "asha": assert early_stopping_rate is not None rung_to_fidelity, _rung_sizes = brackets.calculate_sh_rungs( - bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + bounds=(fidelity_obj.lower, fidelity_obj.upper), eta=eta, early_stopping_rate=early_stopping_rate, ) @@ -1712,7 +1712,7 @@ def _neps_bracket_optimizer( case "async_hb": assert early_stopping_rate is None rung_to_fidelity, bracket_layouts = brackets.calculate_hb_bracket_layouts( - bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + bounds=(fidelity_obj.lower, fidelity_obj.upper), eta=eta, ) # We don't care about the capacity of each bracket, we need the rung layout @@ -1734,7 +1734,7 @@ def _neps_bracket_optimizer( early_stopping_rate=( early_stopping_rate if early_stopping_rate is not None else 0 ), - fid_bounds=(fidelity_obj.min_value, fidelity_obj.max_value), + fid_bounds=(fidelity_obj.lower, fidelity_obj.upper), inc_ratio=inc_ratio, ) case "uniform": @@ -1753,6 +1753,7 @@ def _neps_bracket_optimizer( sampler=_sampler, sample_prior_first=sample_prior_first, create_brackets=create_brackets, + fid_name=fidelity_name, ) diff --git a/neps/optimizers/bracket_optimizer.py b/neps/optimizers/bracket_optimizer.py index b3f9f8461..793d0a421 100644 --- a/neps/optimizers/bracket_optimizer.py +++ b/neps/optimizers/bracket_optimizer.py @@ -421,7 +421,7 @@ def import_trials( f"{list(rung_to_fid.values())}. Skipping config: {config}" ) continue - # create a unique key for the config without the fidelity + # create a unique key for the config without the fidelity config_key = get_trial_config_unique_key( config=config, fid_name=self.fid_name ) diff --git a/neps/optimizers/neps_bracket_optimizer.py b/neps/optimizers/neps_bracket_optimizer.py index f48d17d1f..654426905 100644 --- a/neps/optimizers/neps_bracket_optimizer.py +++ b/neps/optimizers/neps_bracket_optimizer.py @@ -7,6 +7,7 @@ from __future__ import annotations +import copy import logging from collections.abc import Callable, Mapping, Sequence from dataclasses import dataclass @@ -16,8 +17,12 @@ import neps.optimizers.bracket_optimizer as standard_bracket_optimizer from neps.optimizers.neps_priorband import NePSPriorBandSampler -from neps.optimizers.optimizer import SampledConfig +from neps.optimizers.optimizer import ImportedConfig, SampledConfig from neps.optimizers.utils.brackets import PromoteAction, SampleAction +from neps.optimizers.utils.util import ( + get_config_key_to_id_mapping, + get_trial_config_unique_key, +) from neps.space.neps_spaces import neps_space from neps.space.neps_spaces.sampling import ( DomainSampler, @@ -30,6 +35,7 @@ from neps.optimizers.utils.brackets import Bracket from neps.space.neps_spaces.parameters import PipelineSpace from neps.state.optimizer import BudgetInfo + from neps.state.pipeline_eval import UserResultDict from neps.state.trial import Trial @@ -61,6 +67,9 @@ class _NePSBracketOptimizer: """The sampler used to generate new trials.""" sampler: NePSPriorBandSampler | DomainSampler + """The name of the fidelity in the space.""" + fid_name: str + def __call__( # noqa: C901, PLR0912 self, trials: Mapping[str, Trial], @@ -174,9 +183,9 @@ def _sample_prior( _fidelity_attrs = self.space.fidelity_attrs for fidelity_name, fidelity_obj in _fidelity_attrs.items(): if fidelity_level == "max": - _environment_values[fidelity_name] = fidelity_obj.max_value + _environment_values[fidelity_name] = fidelity_obj.upper elif fidelity_level == "min": - _environment_values[fidelity_name] = fidelity_obj.min_value + _environment_values[fidelity_name] = fidelity_obj.lower else: raise ValueError(f"Invalid fidelity level {fidelity_level}") @@ -212,3 +221,61 @@ def _convert_to_another_rung( config = neps_space.NepsCompatConverter.to_neps_config(resolution_context) return dict(**config) + + def import_trials( + self, + external_evaluations: Sequence[tuple[Mapping[str, Any], UserResultDict]], + trials: Mapping[str, Trial], + ) -> list[ImportedConfig]: + rung_to_fid = self.rung_to_fid + + # Use trials_to_table to get all used config IDs + table = standard_bracket_optimizer.trials_to_table(trials) + used_ids = set(table.index.get_level_values("id").tolist()) + + imported_configs = [] + config_to_id = get_config_key_to_id_mapping(table=table, fid_name=self.fid_name) + + for config, result in external_evaluations: + fid_value = config[self.fid_name] + if fid_value not in rung_to_fid.values(): + logger.warning( + f"Fidelity value {fid_value} not in known rung fidelities " + f"{list(rung_to_fid.values())}. Skipping config: {config}" + ) + continue + # create a unique key for the config without the fidelity + config_key = get_trial_config_unique_key( + config=config, fid_name=self.fid_name + ) + # Assign id if not already assigned + if config_key not in config_to_id: + next_id = max(used_ids, default=0) + 1 + config_to_id[config_key] = next_id + used_ids.add(next_id) + else: + existing_id = config_to_id[config_key] + # check if the other config with same key has the same fidelity + try: + existing_config = table.xs(existing_id, level="id")["config"].iloc[0] + if existing_config[self.fid_name] == config[self.fid_name]: + logger.warning( + f"Duplicate configuration with same fidelity found: {config}" + ) + continue + except KeyError: + pass + + config_id = config_to_id[config_key] + + # Find the rung corresponding to the fidelity value in config + rung = next((r for r, f in rung_to_fid.items() if f == fid_value), None) + trial_id = f"{config_id}_rung_{rung}" + imported_configs.append( + ImportedConfig( + id=trial_id, + config=copy.deepcopy(config), + result=copy.deepcopy(result), + ) + ) + return imported_configs diff --git a/neps/optimizers/neps_priorband.py b/neps/optimizers/neps_priorband.py index bd90a49ee..86c824570 100644 --- a/neps/optimizers/neps_priorband.py +++ b/neps/optimizers/neps_priorband.py @@ -170,7 +170,7 @@ def _sample_prior(self) -> dict[str, Any]: _environment_values = {} _fidelity_attrs = self.space.fidelity_attrs for fidelity_name, fidelity_obj in _fidelity_attrs.items(): - _environment_values[fidelity_name] = fidelity_obj.max_value + _environment_values[fidelity_name] = fidelity_obj.upper _resolved_pipeline, resolution_context = neps_space.resolve( pipeline=self.space, @@ -185,7 +185,7 @@ def _sample_random(self) -> dict[str, Any]: _environment_values = {} _fidelity_attrs = self.space.fidelity_attrs for fidelity_name, fidelity_obj in _fidelity_attrs.items(): - _environment_values[fidelity_name] = fidelity_obj.max_value + _environment_values[fidelity_name] = fidelity_obj.upper _resolved_pipeline, resolution_context = neps_space.resolve( pipeline=self.space, diff --git a/neps/optimizers/neps_random_search.py b/neps/optimizers/neps_random_search.py index db0678659..cb9939994 100644 --- a/neps/optimizers/neps_random_search.py +++ b/neps/optimizers/neps_random_search.py @@ -61,7 +61,7 @@ def __init__( fidelity_attrs = self._pipeline.fidelity_attrs for fidelity_name, fidelity_obj in fidelity_attrs.items(): if ignore_fidelity == "highest fidelity": - self._environment_values[fidelity_name] = fidelity_obj.max_value + self._environment_values[fidelity_name] = fidelity_obj.upper elif not ignore_fidelity: raise ValueError( "RandomSearch does not support fidelities by default. Consider using" @@ -70,14 +70,14 @@ def __init__( ) # Sample randomly from the fidelity bounds. elif isinstance(fidelity_obj._domain, Integer): - assert isinstance(fidelity_obj.min_value, int) - assert isinstance(fidelity_obj.max_value, int) + assert isinstance(fidelity_obj.lower, int) + assert isinstance(fidelity_obj.upper, int) self._environment_values[fidelity_name] = random.randint( - fidelity_obj.min_value, fidelity_obj.max_value + fidelity_obj.lower, fidelity_obj.upper ) elif isinstance(fidelity_obj._domain, Float): self._environment_values[fidelity_name] = random.uniform( - fidelity_obj.min_value, fidelity_obj.max_value + fidelity_obj.lower, fidelity_obj.upper ) self._random_sampler = RandomSampler(predefined_samplings={}) @@ -168,7 +168,7 @@ def __init__( fidelity_attrs = self._pipeline.fidelity_attrs for fidelity_name, fidelity_obj in fidelity_attrs.items(): if ignore_fidelity == "highest fidelity": - self._environment_values[fidelity_name] = fidelity_obj.max_value + self._environment_values[fidelity_name] = fidelity_obj.upper elif not ignore_fidelity: raise ValueError( "ComplexRandomSearch does not support fidelities by default. Consider" @@ -177,14 +177,14 @@ def __init__( ) # Sample randomly from the fidelity bounds. elif isinstance(fidelity_obj._domain, Integer): - assert isinstance(fidelity_obj.min_value, int) - assert isinstance(fidelity_obj.max_value, int) + assert isinstance(fidelity_obj.lower, int) + assert isinstance(fidelity_obj.upper, int) self._environment_values[fidelity_name] = random.randint( - fidelity_obj.min_value, fidelity_obj.max_value + fidelity_obj.lower, fidelity_obj.upper ) elif isinstance(fidelity_obj._domain, Float): self._environment_values[fidelity_name] = random.uniform( - fidelity_obj.min_value, fidelity_obj.max_value + fidelity_obj.lower, fidelity_obj.upper ) self._random_sampler = RandomSampler( diff --git a/neps/optimizers/utils/grid.py b/neps/optimizers/utils/grid.py index e3b95f98a..96730a67b 100644 --- a/neps/optimizers/utils/grid.py +++ b/neps/optimizers/utils/grid.py @@ -89,10 +89,10 @@ def make_grid( # noqa: PLR0912, PLR0915, C901 ) elif isinstance(hp, Fidelity): if ignore_fidelity == "highest fidelity": # type: ignore[unreachable] - fid_ranges[name] = [hp.max_value] + fid_ranges[name] = [hp.upper] continue if ignore_fidelity is True: - fid_ranges[name] = [hp.min_value, hp.max_value] + fid_ranges[name] = [hp.lower, hp.upper] continue raise ValueError( "Grid search does not support fidelity natively." @@ -101,7 +101,7 @@ def make_grid( # noqa: PLR0912, PLR0915, C901 elif isinstance(hp, Integer | Float): steps = size_per_numerical_hp # type: ignore[unreachable] xs = torch.linspace(0, 1, steps=steps) - numeric_values = xs * (hp.max_value - hp.min_value) + hp.min_value + numeric_values = xs * (hp.upper - hp.lower) + hp.lower if isinstance(hp, Integer): numeric_values = torch.round(numeric_values) uniq_values = torch.unique(numeric_values).tolist() diff --git a/neps/runtime.py b/neps/runtime.py index aa103cc83..194b4d5f2 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -905,6 +905,9 @@ def load_incumbent_trace( # noqa: C901, PLR0912 fidelity_name ] + if isinstance(evaluated_trial.report.objective_to_minimize, list): + # Skip list objectives for now in incumbent trace + continue state.new_score = evaluated_trial.report.objective_to_minimize if state.new_score is not None and state.new_score < _best_score_so_far: _best_score_so_far = state.new_score diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index c608a8932..1f1fe01ae 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -41,6 +41,52 @@ P = TypeVar("P", bound="PipelineSpace") +def construct_sampling_path( + path_parts: list[str], + domain_obj: Domain, +) -> str: + """Construct a sampling path for a domain object. + + The sampling path uniquely identifies a sampled value in the resolution context. + It consists of the hierarchical path through the pipeline space and a domain + identifier that includes type and range information. + + Args: + path_parts: The hierarchical path parts (e.g., ["Resolvable", "integer1"]). + domain_obj: The domain object for which to construct the path. + + Returns: + A string representing the full sampling path in the format: + "::__" + Example: "Resolvable.integer1::integer__0_1_False" + + Raises: + ValueError: If path_parts is empty or domain_obj is not a Domain. + """ + if not path_parts: + raise ValueError("path_parts cannot be empty") + if not isinstance(domain_obj, Domain): + raise ValueError(f"domain_obj must be a Domain, got {type(domain_obj)}") + + # Get the domain type name (e.g., "integer", "float", "categorical") + domain_obj_type_name = type(domain_obj).__name__.lower() + + # Get the range compatibility identifier (e.g., "0_1_False" for + # Integer(0, 1, log=False)) + range_compatibility_identifier = domain_obj.range_compatibility_identifier + + # Combine type and range: "integer__0_1_False" + domain_obj_identifier = f"{domain_obj_type_name}__{range_compatibility_identifier}" + + # Join path parts with dots: "Resolvable.integer1" + current_path = ".".join(path_parts) + + # Append domain identifier: "Resolvable.integer1::integer__0_1_False" + current_path += "::" + domain_obj_identifier + + return current_path + + class SamplingResolutionContext: """A context for resolving samplings in a NePS space. It manages the resolution root, domain sampler, environment values, @@ -251,17 +297,12 @@ def sample_from(self, domain_obj: Domain) -> Any: f" {domain_obj!r}." + "\nThis should not be happening." ) - # The range compatibility identifier is there to make sure when we say - # the path matches, that the range for the value we are looking up also matches. - domain_obj_type_name = type(domain_obj).__name__.lower() - range_compatibility_identifier = domain_obj.range_compatibility_identifier - domain_obj_identifier = ( - f"{domain_obj_type_name}__{range_compatibility_identifier}" + # Construct the unique sampling path for this domain object + current_path = construct_sampling_path( + path_parts=self._current_path_parts, + domain_obj=domain_obj, ) - current_path = ".".join(self._current_path_parts) - current_path += "::" + domain_obj_identifier - if current_path in self._samplings_made: # We have already sampled a value for this path. This should not happen. # Every time we sample a domain, it should have its own different path. @@ -640,11 +681,11 @@ def _( f" {fidelity_name!r}." ) from err - if not fidelity_obj.min_value <= result <= fidelity_obj.max_value: + if not fidelity_obj.lower <= result <= fidelity_obj.upper: raise ValueError( f"Value for fidelity with name {fidelity_name!r} is outside its allowed" " range " - + f"[{fidelity_obj.min_value!r}, {fidelity_obj.max_value!r}]. " + + f"[{fidelity_obj.lower!r}, {fidelity_obj.upper!r}]. " + f"Received: {result!r}." ) @@ -1208,8 +1249,8 @@ def convert_neps_to_classic_search_space(space: PipelineSpace) -> SearchSpace | ) elif isinstance(value, Integer): classic_space[key] = neps.HPOInteger( - lower=value.min_value, - upper=value.max_value, + lower=value.lower, + upper=value.upper, log=value._log if hasattr(value, "_log") else False, prior=value.prior if value.has_prior else None, prior_confidence=( @@ -1218,8 +1259,8 @@ def convert_neps_to_classic_search_space(space: PipelineSpace) -> SearchSpace | ) elif isinstance(value, Float): classic_space[key] = neps.HPOFloat( - lower=value.min_value, - upper=value.max_value, + lower=value.lower, + upper=value.upper, log=value._log if hasattr(value, "_log") else False, prior=value.prior if value.has_prior else None, prior_confidence=( @@ -1229,8 +1270,8 @@ def convert_neps_to_classic_search_space(space: PipelineSpace) -> SearchSpace | elif isinstance(value, Fidelity): if isinstance(value._domain, Integer): classic_space[key] = neps.HPOInteger( - lower=value._domain.min_value, - upper=value._domain.max_value, + lower=value._domain.lower, + upper=value._domain.upper, log=( value._domain._log if hasattr(value._domain, "_log") @@ -1240,8 +1281,8 @@ def convert_neps_to_classic_search_space(space: PipelineSpace) -> SearchSpace | ) elif isinstance(value._domain, Float): classic_space[key] = neps.HPOFloat( - lower=value._domain.min_value, - upper=value._domain.max_value, + lower=value._domain.lower, + upper=value._domain.upper, log=( value._domain._log if hasattr(value._domain, "_log") @@ -1294,8 +1335,8 @@ class NEPSSpace(PipelineSpace): setattr(NEPSSpace, parameter_name, parameter.value) elif isinstance(parameter, neps.HPOInteger): new_integer = Integer( - min_value=parameter.lower, - max_value=parameter.upper, + lower=parameter.lower, + upper=parameter.upper, log=parameter.log, prior=parameter.prior if parameter.prior else _UNSET, prior_confidence=( @@ -1309,8 +1350,8 @@ class NEPSSpace(PipelineSpace): ) elif isinstance(parameter, neps.HPOFloat): new_float = Float( - min_value=parameter.lower, - max_value=parameter.upper, + lower=parameter.lower, + upper=parameter.upper, log=parameter.log, prior=parameter.prior if parameter.prior else _UNSET, prior_confidence=( diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 3a1f5c31f..eda6d10e5 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -11,7 +11,15 @@ import math import random from collections.abc import Callable, Mapping, Sequence -from typing import Any, Generic, Literal, Protocol, TypeVar, cast, runtime_checkable +from typing import ( + Any, + Generic, + Literal, + Protocol, + TypeVar, + cast, + runtime_checkable, +) T = TypeVar("T") @@ -128,22 +136,22 @@ def compare_domain_to(self, other: object) -> bool: return self._domain == other._domain @property - def min_value(self) -> int | float: + def lower(self) -> int | float: """Get the minimum value of the fidelity domain. Returns: The minimum value of the fidelity domain. """ - return self._domain.min_value + return self._domain.lower @property - def max_value(self) -> int | float: + def upper(self) -> int | float: """Get the maximum value of the fidelity domain. Returns: The maximum value of the fidelity domain. """ - return self._domain.max_value + return self._domain.upper def get_attrs(self) -> Mapping[str, Any]: """Get the attributes of the fidelity as a mapping. @@ -420,13 +428,13 @@ class Domain(Resolvable, abc.ABC, Generic[T]): @property @abc.abstractmethod - def min_value(self) -> T: + def lower(self) -> T: """Get the minimum value of the domain.""" raise NotImplementedError() @property @abc.abstractmethod - def max_value(self) -> T: + def upper(self) -> T: """Get the maximum value of the domain.""" raise NotImplementedError() @@ -524,8 +532,8 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Domain[T]: def _calculate_new_domain_bounds( number_type: type[int] | type[float], - min_value: int | float, - max_value: int | float, + lower: int | float, + upper: int | float, center: int | float, confidence: ConfidenceLevel, ) -> tuple[int, int] | tuple[float, float]: @@ -536,8 +544,8 @@ def _calculate_new_domain_bounds( Args: number_type: The type of numbers in the domain (int or float). - min_value: The minimum value of the domain. - max_value: The maximum value of the domain. + lower: The minimum value of the domain. + upper: The maximum value of the domain. center: The center value around which to calculate the new bounds. confidence: The confidence level for the new bounds. @@ -548,10 +556,9 @@ def _calculate_new_domain_bounds( ValueError: If the center value is not within the domain's range or if the number_type is not supported. """ - if center < min_value or center > max_value: + if center < lower or center > upper: raise ValueError( - f"Center value {center!r} must be within domain range [{min_value!r}," - f" {max_value!r}]" + f"Center value {center!r} must be within domain range [{lower!r}, {upper!r}]" ) # Determine a chunk size by splitting the domain range into a fixed number of chunks. @@ -559,7 +566,7 @@ def _calculate_new_domain_bounds( # around the given center (on each side). number_of_chunks = 10.0 - chunk_size = (max_value - min_value) / number_of_chunks + chunk_size = (upper - lower) / number_of_chunks # The numbers refer to how many segments to have on each side of the center. # TODO: [lum] we need to make sure that in the end the range does not just have the @@ -575,11 +582,11 @@ def _calculate_new_domain_bounds( if number_type is int: # In this case we need to use ceil/floor so that we end up with ints. - new_min = max(min_value, math.floor(center - interval_radius)) - new_max = min(max_value, math.ceil(center + interval_radius)) + new_min = max(lower, math.floor(center - interval_radius)) + new_max = min(upper, math.ceil(center + interval_radius)) elif number_type is float: - new_min = max(min_value, center - interval_radius) - new_max = min(max_value, center + interval_radius) + new_min = max(lower, center - interval_radius) + new_max = min(upper, center + interval_radius) else: raise ValueError(f"Unsupported number type {number_type!r}.") @@ -597,7 +604,12 @@ class Categorical(Domain[int], Generic[T]): def __init__( self, - choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T] | Resolvable, + choices: ( + tuple[T | Domain[T] | Resolvable | Any, ...] + | Sequence[T | Domain[T] | Resolvable | Any] + | Domain[T] + | Resolvable + ), prior: int | Domain[int] | _Unset = _UNSET, prior_confidence: ( ConfidenceLevel | Literal["low", "medium", "high"] | _Unset @@ -606,12 +618,16 @@ def __init__( """Initialize the Categorical domain with choices and optional prior. Args: - choices: A tuple of choices or a Domain of choices. + choices: A tuple or list of choices or a Domain of choices. prior: The index of the prior choice in the choices tuple. prior_confidence: The confidence level of the prior choice. """ - self._choices: tuple[T | Domain[T] | Resolvable | Any, ...] | Domain[T] + self._choices: ( + tuple[T | Domain[T] | Resolvable | Any, ...] + | Sequence[T | Domain[T] | Resolvable | Any] + | Domain[T] + ) if isinstance(choices, Sequence): self._choices = tuple(choice for choice in choices) if any(isinstance(choice, tuple) for choice in self._choices) and any( @@ -658,13 +674,13 @@ def compare_domain_to(self, other: object) -> bool: if not isinstance(other, Categorical): return False return ( - self._prior == other._prior - and self._prior_confidence == other._prior_confidence + self._prior == other.prior + and self._prior_confidence == other.prior_confidence and self.choices == other.choices ) @property - def min_value(self) -> int: + def lower(self) -> int: """Get the minimum value of the categorical domain. Returns: @@ -674,7 +690,7 @@ def min_value(self) -> int: return 0 @property - def max_value(self) -> int: + def upper(self) -> int: """Get the maximum value of the categorical domain. Returns: @@ -692,7 +708,11 @@ def choices(self) -> tuple[T | Domain[T] | Resolvable, ...] | Domain[T]: A tuple of choices or a Domain of choices. """ - return self._choices + return ( + self._choices + if not isinstance(self._choices, Sequence) + else tuple(self._choices) + ) @property def has_prior(self) -> bool: @@ -780,8 +800,8 @@ def centered_around( tuple[int, int], _calculate_new_domain_bounds( number_type=int, - min_value=self.min_value, - max_value=self.max_value, + lower=self.lower, + upper=self.upper, center=center, confidence=confidence, ), @@ -798,8 +818,8 @@ class Float(Domain[float]): """A domain representing a continuous range of floating-point values. Attributes: - min_value: The minimum value of the domain. - max_value: The maximum value of the domain. + lower: The minimum value of the domain. + upper: The maximum value of the domain. log: Whether to sample values on a logarithmic scale. prior: The prior value for the domain, if any. prior_confidence: The confidence level of the prior value. @@ -807,8 +827,8 @@ class Float(Domain[float]): def __init__( self, - min_value: float, - max_value: float, + lower: float, + upper: float, log: bool = False, # noqa: FBT001, FBT002 prior: float | _Unset = _UNSET, prior_confidence: ( @@ -818,15 +838,15 @@ def __init__( """Initialize the Float domain with min and max values, and optional prior. Args: - min_value: The minimum value of the domain. - max_value: The maximum value of the domain. + lower: The minimum value of the domain. + upper: The maximum value of the domain. log: Whether to sample values on a logarithmic scale. prior: The prior value for the domain, if any. prior_confidence: The confidence level of the prior value. """ - self._min_value = min_value - self._max_value = max_value + self._lower = lower + self._upper = upper self._log = log self._prior = prior self._prior_confidence = ( @@ -841,7 +861,7 @@ def __init__( def __str__(self) -> str: """Get a string representation of the floating-point domain.""" - string = f"Float({self._min_value}, {self._max_value}" + string = f"Float({self._lower}, {self._upper}" if self._log: string += ", log" if self.has_prior: @@ -865,38 +885,48 @@ def compare_domain_to(self, other: object) -> bool: if not isinstance(other, Float): return False return ( - self._prior == other._prior - and self._prior_confidence == other._prior_confidence - and self.min_value == other.min_value - and self.max_value == other.max_value - and self._log == other._log + self._prior == other.prior + and self._prior_confidence == other.prior_confidence + and self.lower == other.lower + and self.upper == other.upper + and self._log == other.log ) @property - def min_value(self) -> float: + def lower(self) -> float: """Get the minimum value of the floating-point domain. Returns: The minimum value of the domain. Raises: - ValueError: If min_value is greater than max_value. + ValueError: If lower is greater than upper. """ - return self._min_value + return self._lower @property - def max_value(self) -> float: + def upper(self) -> float: """Get the maximum value of the floating-point domain. Returns: The maximum value of the domain. Raises: - ValueError: If min_value is greater than max_value. + ValueError: If lower is greater than upper. + + """ + return self._upper + + @property + def log(self) -> bool: + """Check if the floating-point domain uses logarithmic sampling. + + Returns: + True if values should be sampled on a logarithmic scale, False otherwise. """ - return self._max_value + return self._log @property def has_prior(self) -> bool: @@ -948,7 +978,7 @@ def range_compatibility_identifier(self) -> str: the domain is logarithmic. """ - return f"{self._min_value}_{self._max_value}_{self._log}" + return f"{self._lower}_{self._upper}_{self._log}" def sample(self) -> float: """Sample a random floating-point value from the domain. @@ -957,14 +987,14 @@ def sample(self) -> float: A randomly selected floating-point value within the domain's range. Raises: - ValueError: If min_value is greater than max_value. + ValueError: If lower is greater than upper. """ if self._log: - log_min = math.log(self._min_value) - log_max = math.log(self._max_value) + log_min = math.log(self._lower) + log_max = math.log(self._upper) return float(math.exp(random.uniform(log_min, log_max))) - return float(random.uniform(self._min_value, self._max_value)) + return float(random.uniform(self._lower, self._upper)) def centered_around( self, @@ -988,14 +1018,14 @@ def centered_around( """ new_min, new_max = _calculate_new_domain_bounds( number_type=float, - min_value=self.min_value, - max_value=self.max_value, + lower=self.lower, + upper=self.upper, center=center, confidence=confidence, ) return Float( - min_value=new_min, - max_value=new_max, + lower=new_min, + upper=new_max, log=self._log, prior=center, prior_confidence=confidence, @@ -1006,8 +1036,8 @@ class Integer(Domain[int]): """A domain representing a range of integer values. Attributes: - min_value: The minimum value of the domain. - max_value: The maximum value of the domain. + lower: The minimum value of the domain. + upper: The maximum value of the domain. log: Whether to sample values on a logarithmic scale. prior: The prior value for the domain, if any. prior_confidence: The confidence level of the prior value. @@ -1015,8 +1045,8 @@ class Integer(Domain[int]): def __init__( self, - min_value: int, - max_value: int, + lower: int, + upper: int, log: bool = False, # noqa: FBT001, FBT002 prior: float | int | _Unset = _UNSET, prior_confidence: ( @@ -1026,14 +1056,14 @@ def __init__( """Initialize the Integer domain with min and max values, and optional prior. Args: - min_value: The minimum value of the domain. - max_value: The maximum value of the domain. + lower: The minimum value of the domain. + upper: The maximum value of the domain. log: Whether to sample values on a logarithmic scale. prior: The prior value for the domain, if any. prior_confidence: The confidence level of the prior value. """ - self._min_value = min_value - self._max_value = max_value + self._lower = lower + self._upper = upper self._log = log self._prior = prior self._prior_confidence = ( @@ -1048,7 +1078,7 @@ def __init__( def __str__(self) -> str: """Get a string representation of the integer domain.""" - string = f"Integer({self._min_value}, {self._max_value}" + string = f"Integer({self._lower}, {self._upper}" if self._log: string += ", log" if self.has_prior: @@ -1072,38 +1102,48 @@ def compare_domain_to(self, other: object) -> bool: if not isinstance(other, Integer): return False return ( - self._prior == other._prior - and self._prior_confidence == other._prior_confidence - and self.min_value == other.min_value - and self.max_value == other.max_value - and self._log == other._log + self._prior == other.prior + and self._prior_confidence == other.prior_confidence + and self.lower == other.lower + and self.upper == other.upper + and self._log == other.log ) @property - def min_value(self) -> int: + def lower(self) -> int: """Get the minimum value of the integer domain. Returns: The minimum value of the domain. Raises: - ValueError: If min_value is greater than max_value. + ValueError: If lower is greater than upper. """ - return self._min_value + return self._lower @property - def max_value(self) -> int: + def upper(self) -> int: """Get the maximum value of the integer domain. Returns: The maximum value of the domain. Raises: - ValueError: If min_value is greater than max_value. + ValueError: If lower is greater than upper. + + """ + return self._upper + + @property + def log(self) -> bool: + """Check if the integer domain uses logarithmic sampling. + + Returns: + True if values should be sampled on a logarithmic scale, False otherwise. """ - return self._max_value + return self._log @property def has_prior(self) -> bool: @@ -1154,7 +1194,7 @@ def range_compatibility_identifier(self) -> str: the domain is logarithmic. """ - return f"{self._min_value}_{self._max_value}_{self._log}" + return f"{self._lower}_{self._upper}_{self._log}" def sample(self) -> int: """Sample a random integer value from the domain. @@ -1165,11 +1205,9 @@ def sample(self) -> int: """ if self._log: return int( - math.exp( - random.uniform(math.log(self._min_value), math.log(self._max_value)) - ) + math.exp(random.uniform(math.log(self._lower), math.log(self._upper))) ) - return int(random.randint(self._min_value, self._max_value)) + return int(random.randint(self._lower, self._upper)) def centered_around( self, @@ -1195,15 +1233,15 @@ def centered_around( tuple[int, int], _calculate_new_domain_bounds( number_type=int, - min_value=self.min_value, - max_value=self.max_value, + lower=self.lower, + upper=self.upper, center=center, confidence=confidence, ), ) return Integer( - min_value=new_min, - max_value=new_max, + lower=new_min, + upper=new_max, log=self._log, prior=center, prior_confidence=confidence, diff --git a/neps/space/neps_spaces/sampling.py b/neps/space/neps_spaces/sampling.py index fe6a86a47..0368e2b89 100644 --- a/neps/space/neps_spaces/sampling.py +++ b/neps/space/neps_spaces/sampling.py @@ -211,18 +211,16 @@ def __call__( # Sample an integer from a Gaussian distribution centered around the # prior, cut of the tails to ensure the value is within the domain's # range. Using the _prior_probability to determine the standard deviation - assert hasattr(domain_obj, "min_value") - assert hasattr(domain_obj, "max_value") + assert hasattr(domain_obj, "lower") + assert hasattr(domain_obj, "upper") assert hasattr(domain_obj, "prior") std_dev = 1 / ( - 10 - * _prior_probability - / (domain_obj.max_value - domain_obj.min_value) # type: ignore + 10 * _prior_probability / (domain_obj.upper - domain_obj.lower) # type: ignore ) - a = (domain_obj.min_value - domain_obj.prior) / std_dev # type: ignore - b = (domain_obj.max_value - domain_obj.prior) / std_dev # type: ignore + a = (domain_obj.lower - domain_obj.prior) / std_dev # type: ignore + b = (domain_obj.upper - domain_obj.prior) / std_dev # type: ignore sampled_value = stats.truncnorm.rvs( a=a, b=b, diff --git a/neps/space/parameters.py b/neps/space/parameters.py index a08d907c1..723b1fd38 100644 --- a/neps/space/parameters.py +++ b/neps/space/parameters.py @@ -97,10 +97,6 @@ def __post_init__(self) -> None: self.domain = Domain.floating(self.lower, self.upper, log=self.log) self.center = self.domain.cast_one(0.5, frm=Domain.unit_float()) - def validate(self, value: Any) -> bool: - """Validate if a value is within the bounds of the float parameter.""" - return isinstance(value, float | int) and self.lower <= value <= self.upper - @dataclass class HPOInteger: @@ -194,10 +190,6 @@ def __post_init__(self) -> None: self.domain = Domain.integer(self.lower, self.upper, log=self.log) self.center = self.domain.cast_one(0.5, frm=Domain.unit_float()) - def validate(self, value: Any) -> bool: - """Validate if a value is within the bounds of the parameter.""" - return isinstance(value, float | int) and self.lower <= value <= self.upper - @dataclass class HPOCategorical: @@ -260,10 +252,6 @@ def __post_init__(self) -> None: self.center = self.choices[0] self.domain = Domain.indices(len(self.choices), is_categorical=True) - def validate(self, value: Any) -> bool: - """Validate if a value is one of the choices of the categorical parameter.""" - return value in self.choices - @dataclass class HPOConstant: @@ -295,10 +283,6 @@ def center(self) -> Any: """ return self.value - def validate(self, value: Any) -> bool: - """Validate if a value is the same as the constant parameter's value.""" - return value == self.value - Parameter: TypeAlias = HPOFloat | HPOInteger | HPOCategorical """A type alias for all the parameter types. diff --git a/neps/state/neps_state.py b/neps/state/neps_state.py index c99c9d1e9..928b5e07b 100644 --- a/neps/state/neps_state.py +++ b/neps/state/neps_state.py @@ -288,8 +288,8 @@ def lock_and_set_new_worker_id(self, worker_id: str | None = None) -> str: ) if opt_state.worker_ids and worker_id in opt_state.worker_ids: raise NePSError( - f"Worker id '{worker_id}' already exists, \ - reserved worker ids: {opt_state.worker_ids}" + f"Worker id '{worker_id}' already exists, " + f" reserved worker ids: {opt_state.worker_ids}" ) if opt_state.worker_ids is None: opt_state.worker_ids = [] @@ -362,6 +362,11 @@ def lock_and_import_trials( report=trial.report, worker_id=worker_id, ) + # Log imported trial similar to normal evaluation + logger.info( + f"Imported trial {trial.id} with result: " + f"{trial.report.objective_to_minimize}." + ) return trials def lock_and_report_trial_evaluation( diff --git a/neps/utils/trial_io.py b/neps/utils/trial_io.py index 8a9c948eb..b1e823a10 100644 --- a/neps/utils/trial_io.py +++ b/neps/utils/trial_io.py @@ -3,11 +3,11 @@ from __future__ import annotations from collections.abc import Mapping, Sequence, ValuesView -from dataclasses import asdict from pathlib import Path from typing import TYPE_CHECKING, Any from neps.state.neps_state import TrialRepo +from neps.state.pipeline_eval import UserResultDict if TYPE_CHECKING: from neps.state.trial import Trial @@ -15,7 +15,7 @@ def load_trials_from_pickle( root_dir: Path | str, -) -> Sequence[tuple[Mapping[str, Any], dict]]: +) -> Sequence[tuple[Mapping[str, Any], UserResultDict]]: """Load trials from a pickle-based TrialRepo. Args: @@ -37,7 +37,7 @@ def load_trials_from_pickle( ) return [ - (trial.config, asdict(trial.report)) + (trial.config, UserResultDict(**trial.report.__annotations__)) for trial in trials if trial.report is not None ] diff --git a/neps/validation.py b/neps/validation.py index 8d64cbc9a..8a50a2a76 100644 --- a/neps/validation.py +++ b/neps/validation.py @@ -3,12 +3,16 @@ from __future__ import annotations from collections.abc import Mapping -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any +from typing_extensions import assert_never from neps.exceptions import TrialValidationError +from neps.space import SearchSpace +from neps.space.neps_spaces.parameters import PipelineSpace if TYPE_CHECKING: - from neps.space import SearchSpace + from neps.space.neps_spaces.parameters import Categorical, Float, Integer + from neps.space.parameters import HPOCategorical, HPOConstant, HPOFloat, HPOInteger from neps.state.pipeline_eval import UserResultDict @@ -18,28 +22,146 @@ def _validate_imported_result(result: UserResultDict) -> None: raise TrialValidationError(config=result, message="Missing objective_to_minimize") -def _validate_imported_config( - space: SearchSpace, config: Mapping[str, float] -) -> None | Exception: +def validate_parameter_value( + param: ( + HPOFloat + | HPOInteger + | HPOCategorical + | HPOConstant + | Float + | Integer + | Categorical + ), + value: Any, +) -> bool: + """Validate a parameter value against its parameter definition. + + Works with both SearchSpace parameters (HPOFloat, HPOInteger, HPOCategorical, + HPOConstant) and PipelineSpace parameters (Float, Integer, Categorical from + neps_spaces). + + Args: + param: The parameter definition (from either SearchSpace or PipelineSpace) + value: The value to validate + + Returns: + bool: True if the value is valid for the parameter, False otherwise + """ + # Import here to avoid circular dependencies + from neps.space.neps_spaces.parameters import ( + Categorical as NepsCategorical, + Float as NepsFloat, + Integer as NepsInteger, + ) + from neps.space.parameters import ( + HPOCategorical, + HPOConstant, + HPOFloat, + HPOInteger, + ) + + # Float parameters - both use .lower and .upper + if isinstance(param, HPOFloat | NepsFloat): + return isinstance(value, float | int) and param.lower <= value <= param.upper + + # Integer parameters - both use .lower and .upper + if isinstance(param, HPOInteger | NepsInteger): + return isinstance(value, int) and param.lower <= value <= param.upper + + # Categorical parameters - both use .choices + if isinstance(param, HPOCategorical): + choices = param.choices + return value in choices + if isinstance(param, NepsCategorical): + return ( + 0 + <= value + < (len(list(param.choices)) if isinstance(param.choices, tuple) else 1) + ) + + # Constant - SearchSpace only + if isinstance(param, HPOConstant): + return value == param.value + + # Exhaustiveness check - all cases should be covered + assert_never(param) + + +def _validate_imported_config( # noqa: C901, PLR0912 + space: SearchSpace | PipelineSpace, config: Mapping[str, float] +) -> None: """Validate a configuration against the search space. Args: - space (SearchSpace): The search space to validate against. - config (dict): The configuration to validate. + space: The search space to validate against. + config: The configuration to validate. Raises: - ValueError: If the configuration is not valid. + TrialValidationError: If the configuration is not valid. """ - all_params = {**space.searchables, **space.fidelities} - for key in space.searchables: - if key not in config: - raise TrialValidationError(config=config, message=f"Missing key: {key}") - - for key, param in all_params.items(): - if key in config and not param.validate(config[key]): - raise TrialValidationError( - config=config, - message=f"Invalid value for parameter: {key}", - ) - return None + if isinstance(space, SearchSpace): + all_params = {**space.searchables, **space.fidelities} + for key in space.searchables: + if key not in config: + raise TrialValidationError(config=config, message=f"Missing key: {key}") + + for key, param in all_params.items(): + if key in config and not validate_parameter_value(param, config[key]): + raise TrialValidationError( + config=config, + message=f"Invalid value for parameter: {key}", + ) + elif isinstance(space, PipelineSpace): + # For PipelineSpace, we need to check for the prefixed keys + # Import here to avoid circular import + from neps.space.neps_spaces.neps_space import ( + NepsCompatConverter, + construct_sampling_path, + ) + from neps.space.neps_spaces.parameters import Domain + + # Check that all expected parameter keys are present in the config + for param_name, param_obj in space.get_attrs().items(): + if isinstance(param_obj, Domain): + # Construct the expected sampling path + sampling_path = construct_sampling_path( + path_parts=["Resolvable", param_name], + domain_obj=param_obj, + ) + expected_key = f"{NepsCompatConverter._SAMPLING_PREFIX}{sampling_path}" + if expected_key not in config: + raise TrialValidationError( + config=config, message=f"Missing key: {expected_key}" + ) + + # Check that all expected fidelity keys are present in the config + for fidelity_name in space.fidelity_attrs: + expected_key = f"{NepsCompatConverter._ENVIRONMENT_PREFIX}{fidelity_name}" + if expected_key not in config: + raise TrialValidationError( + config=config, message=f"Missing fidelity key: {expected_key}" + ) + + # Validate parameter values for PipelineSpace + # Note: PipelineSpace doesn't have a searchables attribute like SearchSpace + # We need to validate the attrs that are actual parameters + from neps.space.neps_spaces.parameters import Categorical, Float, Integer + + for param_name, param in space.get_attrs().items(): # type: ignore[unreachable] + if isinstance(param, Float | Integer | Categorical): # type: ignore[unreachable] + # Construct the expected sampling path and key + sampling_path = construct_sampling_path( # type: ignore[unreachable] + path_parts=["Resolvable", param_name], + domain_obj=param, + ) + expected_key = f"{NepsCompatConverter._SAMPLING_PREFIX}{sampling_path}" + + # Validate the value if the key is present in config + if expected_key in config and not validate_parameter_value( + param, config[expected_key] + ): + raise TrialValidationError( + config=config, + message=f"Invalid value for parameter: {expected_key}", + ) diff --git a/neps_examples/basic_usage/example_import_trials.py b/neps_examples/basic_usage/example_import_trials.py index 32d9b552b..25296e48d 100644 --- a/neps_examples/basic_usage/example_import_trials.py +++ b/neps_examples/basic_usage/example_import_trials.py @@ -8,8 +8,9 @@ import torch import argparse import neps.utils +from typing import Any -logging.basicConfig(level=logging.DEBUG) +logging.basicConfig(level=logging.INFO) seed = 42 random.seed(seed) @@ -18,184 +19,185 @@ if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) + def get_evaluate_pipeline_func(optimizer): match optimizer: case "primo": - def evaluate_pipeline(float1, float2, categorical, integer1, integer2): - objective_to_minimize = [ - float1 - 0.3, - float2 - 3.6 - ] + + def evaluate_pipeline_MO(float1, float2, **kwargs): + objective_to_minimize = [float1 - 0.3, float2 - 3.6] return objective_to_minimize + + evaluate_pipeline = evaluate_pipeline_MO + case "ifbo": - def evaluate_pipeline(float1, float2, categorical, integer1, integer2): - objective_to_minimize = abs(float1) / abs(float( - np.sum([float1, float2, int(categorical), integer1, integer2])) + + def evaluate_pipeline_IFBO(float1, float2, categorical, integer1, integer2): + objective_to_minimize = abs(float1) / abs( + float(np.sum([float1, float2, int(categorical), integer1, integer2])) ) return objective_to_minimize + + evaluate_pipeline = evaluate_pipeline_IFBO + case _: - def evaluate_pipeline(float1, float2, categorical, integer1, integer2): + + def evaluate_pipeline_default( + float1, float2, categorical, integer1, integer2 + ): objective_to_minimize = -float( np.sum([float1, float2, int(categorical), integer1, integer2]) ) return objective_to_minimize + + evaluate_pipeline = evaluate_pipeline_default + return evaluate_pipeline -def get_evaluated_trials(optimizer): - # Each optimizer gets its own evaluated trials fixture - match optimizer: - case "asha": - return [ - ({ - "float1": 0.5417078469603526, - "float2": 3.3333333333333335, - "categorical": 1, - "integer1": 0, - "integer2": 1000, - }, UserResultDict(objective_to_minimize=-1011.5417078469603)), - ] - case "successive_halving": - return [ - ({ - "float1": 0.5417078469603526, - "float2": 3.3333333333333335, - "categorical": 1, - "integer1": 0, - "integer2": 1000, - }, UserResultDict(objective_to_minimize=-1011.5417078469603)), - ] - case "priorband": - return [ - ({ - "float1": 0.5417078469603526, - "float2": 3.3333333333333335, - "categorical": 1, - "integer1": 0, - "integer2": 1000, - }, UserResultDict(objective_to_minimize=-1011.5417078469603)), - ] - case "primo": - return [ - ({ - "float1": 0.5417078469603526, - "float2": 3.3333333333333335, - "categorical": 1, - "integer1": 0, - "integer2": 1000, - }, UserResultDict(objective_to_minimize=[0.5417078469603, 3.3333333333333335])), - ({ - "float1": 0.5417078469603526, - "float2": 3.6, - "categorical": 1, - "integer1": 0, - "integer2": 1000, - }, UserResultDict(objective_to_minimize=[0.2417078469603, 3.6])), - ] - case "ifbo": - return [ - ({ - "float1": 0.5417078469603526, - "float2": 3.3333333333333335, - "categorical": 1, - "integer1": 0, - "integer2": 1000, - }, UserResultDict(objective_to_minimize=0.5417078469603)), - ({ - "float1": 0.5417078469603526, - "float2": 3.6, - "categorical": 1, - "integer1": 0, - "integer2": 1000, - }, UserResultDict(objective_to_minimize=0.2417078469603)), - ] - case "hyperband": - return [ - ({ - "float1": 0.5417078469603526, - "float2": 3.3333333333333335, - "categorical": 1, - "integer1": 0, - "integer2": 1000, - }, UserResultDict(objective_to_minimize=-1011.5417078469603)), - ({ +def get_evaluated_trials(optimizer) -> list[tuple[dict[str, Any], UserResultDict]]: + # Common config used by multiple optimizers + classic_base_config = { + "float1": 0.5417078469603526, + "float2": 3.3333333333333335, + "categorical": 1, + "integer1": 0, + "integer2": 1000, + } + neps_base_config = { + "ENVIRONMENT__float2": 1, + "SAMPLING__Resolvable.categorical::categorical__2": 0, + "SAMPLING__Resolvable.float1::float__0_1_False": 0.5, + "SAMPLING__Resolvable.integer1::integer__0_1_False": 1, + "SAMPLING__Resolvable.integer2::integer__1_1000_True": 5, + } + base_result = UserResultDict(objective_to_minimize=-1011.5417078469603) + + # Mapping of optimizers to their evaluated trials + trials_map = { + "asha": [(classic_base_config, base_result)], + "successive_halving": [(classic_base_config, base_result)], + "priorband": [(classic_base_config, base_result)], + "primo": [ + ( + classic_base_config, + UserResultDict( + objective_to_minimize=[0.5417078469603, 3.3333333333333335] + ), + ), + ( + {**classic_base_config, "float2": 3.6}, + UserResultDict(objective_to_minimize=[0.2417078469603, 3.6]), + ), + ], + "ifbo": [ + (classic_base_config, UserResultDict(objective_to_minimize=0.5417078469603)), + ( + {**classic_base_config, "float2": 3.6}, + UserResultDict(objective_to_minimize=0.2417078469603), + ), + ], + "hyperband": [ + (classic_base_config, base_result), + ( + { "float1": 0.5417078469603526, "categorical": 1, "integer1": 0, "integer2": 800, - }, UserResultDict(objective_to_minimize=-1011.5417078469603)), - ] - case "bayesian_optimization": - return [ - ({ + }, + base_result, + ), + ], + "bayesian_optimization": [ + ( + { "float1": 0.5884444338738143, "float2": 3.3333333333333335, "categorical": 0, "integer1": 0, "integer2": 1000, - }, {"objective_to_minimize": -1011.5417078469603}), - ] - case "async_hb": - return [ - ({ - "float1": 0.5417078469603526, - "float2": 3.3333333333333335, - "categorical": 1, - "integer1": 0, - "integer2": 1000, - }, UserResultDict(objective_to_minimize=-1011.5417078469603)), - ] - - raise ValueError(f"Unknown optimizer: {optimizer}") + }, + {"objective_to_minimize": -1011.5417078469603}, + ), + ], + "async_hb": [(classic_base_config, base_result)], + "neps_hyperband": [(neps_base_config, base_result)], + "neps_priorband": [(neps_base_config, base_result)], + } + + if optimizer not in trials_map: + raise ValueError(f"Unknown optimizer: {optimizer}") + + return trials_map[optimizer] + def run_import_trials(optimizer): - pipeline_space = neps.SearchSpace( - dict( - float1=neps.Float(lower=0, upper=1), - float2=neps.Float(lower=1, upper=10, is_fidelity=True), - categorical=neps.Categorical(choices=[0, 1]), - integer1=neps.Integer(lower=0, upper=1), - integer2=neps.Integer(lower=1, upper=1000, log=True), - ) + class ExampleSpace(neps.PipelineSpace): + float1 = neps.Float(lower=0, upper=1) + float2 = neps.Fidelity(neps.Float(lower=1, upper=10)) + categorical = neps.Categorical(choices=[0, 1]) + integer1 = neps.Integer(lower=0, upper=1) + integer2 = neps.Integer(lower=1, upper=1000, log=True) + + + logging.info( + f"{'-'*80} Running initial evaluations for optimizer {optimizer}. {'-'*80}" ) - # here we write something + # here we write something neps.run( evaluate_pipeline=get_evaluate_pipeline_func(optimizer=optimizer), - pipeline_space=pipeline_space, - root_directory=f"initial_results_{optimizer}", + pipeline_space=ExampleSpace(), + root_directory=f"results/trial_import/initial_results_{optimizer}", + overwrite_root_directory=True, fidelities_to_spend=5, worker_id=f"worker_{optimizer}-{socket.gethostname()}-{os.getpid()}", - optimizer=optimizer + optimizer=optimizer, ) - trials = neps.utils.load_trials_from_pickle(root_dir=f"initial_results_{optimizer}") + trials = neps.utils.load_trials_from_pickle( + root_dir=f"results/trial_import/initial_results_{optimizer}" + ) + + logging.info( + f"{'-'*80} Importing {len(trials)} trials for optimizer {optimizer}. {'-'*80}" + ) # import trials been evaluated above neps.import_trials( - pipeline_space, + ExampleSpace(), evaluated_trials=trials, - root_directory=f"results_{optimizer}", - optimizer=optimizer + root_directory=f"results/trial_import/results_{optimizer}", + overwrite_root_directory=True, + optimizer=optimizer, ) - # imort some trials evaluated in some other setup + logging.info( + f"{'-'*80} Importing {len(get_evaluated_trials(optimizer))} trials for optimizer" + f" {optimizer}. {'-'*80}" + ) + + # import some trials evaluated in some other setup neps.import_trials( - pipeline_space, + ExampleSpace(), evaluated_trials=get_evaluated_trials(optimizer), - root_directory=f"results_{optimizer}", - optimizer=optimizer + root_directory=f"results/trial_import/results_{optimizer}", + optimizer=optimizer, ) + logging.info(f"{'-'*80} Running after import for optimizer {optimizer}. {'-'*80}") + neps.run( evaluate_pipeline=get_evaluate_pipeline_func(optimizer=optimizer), - pipeline_space=pipeline_space, - root_directory=f"results_{optimizer}", - fidelities_to_spend=20, + pipeline_space=ExampleSpace(), + root_directory=f"results/trial_import/results_{optimizer}", + fidelities_to_spend=10, worker_id=f"worker_{optimizer}_resume-{socket.gethostname()}-{os.getpid()}", - optimizer=optimizer + optimizer=optimizer, ) + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( @@ -203,10 +205,18 @@ def run_import_trials(optimizer): type=str, required=True, choices=[ - "asha", "successive_halving", "priorband", "primo", - "ifbo", "hyperband", "bayesian_optimization", "async_hb" + "asha", + "successive_halving", + "priorband", + "primo", + "ifbo", + "hyperband", + "bayesian_optimization", + "async_hb", + "neps_hyperband", + "neps_priorband", ], - help="Optimizer to test." + help="Optimizer to test.", ) args = parser.parse_args() print(f"Testing import_trials for optimizer: {args.optimizer}") diff --git a/neps_examples/basic_usage/hyperparameters.py b/neps_examples/basic_usage/hyperparameters.py index 8163e2f63..b61a24eba 100644 --- a/neps_examples/basic_usage/hyperparameters.py +++ b/neps_examples/basic_usage/hyperparameters.py @@ -3,11 +3,13 @@ import neps import socket import os + # This example demonstrates how to use NePS to optimize hyperparameters # of a pipeline. The pipeline is a simple function that takes in # five hyperparameters and returns their sum. # Neps uses the default optimizer to minimize this objective function. + def evaluate_pipeline(float1, float2, categorical, integer1, integer2): objective_to_minimize = -float( np.sum([float1, float2, int(categorical), integer1, integer2]) @@ -16,11 +18,11 @@ def evaluate_pipeline(float1, float2, categorical, integer1, integer2): class HPOSpace(neps.PipelineSpace): - float1 = neps.Float(min_value=0, max_value=1) - float2 = neps.Float(min_value=-10, max_value=10) + float1 = neps.Float(lower=0, upper=1) + float2 = neps.Float(lower=-10, upper=10) categorical = neps.Categorical(choices=(0, 1)) - integer1 = neps.Integer(min_value=0, max_value=1) - integer2 = neps.Integer(min_value=1, max_value=1000, log=True) + integer1 = neps.Integer(lower=0, upper=1) + integer2 = neps.Integer(lower=1, upper=1000, log=True) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/convenience/logging_additional_info.py b/neps_examples/convenience/logging_additional_info.py index a511c9318..796b8db99 100644 --- a/neps_examples/convenience/logging_additional_info.py +++ b/neps_examples/convenience/logging_additional_info.py @@ -22,11 +22,11 @@ def evaluate_pipeline(float1, float2, categorical, integer1, integer2): class HPOSpace(neps.PipelineSpace): - float1 = neps.Float(min_value=0, max_value=1) - float2 = neps.Float(min_value=-10, max_value=10) + float1 = neps.Float(lower=0, upper=1) + float2 = neps.Float(lower=-10, upper=10) categorical = neps.Categorical(choices=(0, 1)) - integer1 = neps.Integer(min_value=0, max_value=1) - integer2 = neps.Integer(min_value=1, max_value=1000, log=True) + integer1 = neps.Integer(lower=0, upper=1) + integer2 = neps.Integer(lower=1, upper=1000, log=True) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/convenience/neps_tblogger_tutorial.py b/neps_examples/convenience/neps_tblogger_tutorial.py index 2d1e44ff3..952f82716 100644 --- a/neps_examples/convenience/neps_tblogger_tutorial.py +++ b/neps_examples/convenience/neps_tblogger_tutorial.py @@ -212,9 +212,9 @@ def training( def pipeline_space() -> neps.PipelineSpace: class HPOSpace(neps.PipelineSpace): - lr = neps.Float(min_value=1e-5, max_value=1e-1, log=True) + lr = neps.Float(lower=1e-5, upper=1e-1, log=True) optim = neps.Categorical(choices=("Adam", "SGD")) - weight_decay = neps.Float(min_value=1e-4, max_value=1e-1, log=True) + weight_decay = neps.Float(lower=1e-4, upper=1e-1, log=True) return HPOSpace() diff --git a/neps_examples/convenience/running_on_slurm_scripts.py b/neps_examples/convenience/running_on_slurm_scripts.py index a8c8658dc..c43ea01f6 100644 --- a/neps_examples/convenience/running_on_slurm_scripts.py +++ b/neps_examples/convenience/running_on_slurm_scripts.py @@ -52,7 +52,7 @@ def evaluate_pipeline_via_slurm( class HPOSpace(neps.PipelineSpace): optimizer = neps.Categorical(choices=("sgd", "adam")) - learning_rate = neps.Float(min_value=10e-7, max_value=10e-3, log=True) + learning_rate = neps.Float(lower=10e-7, upper=10e-3, log=True) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/convenience/working_directory_per_pipeline.py b/neps_examples/convenience/working_directory_per_pipeline.py index cbf510e4a..115094cd4 100644 --- a/neps_examples/convenience/working_directory_per_pipeline.py +++ b/neps_examples/convenience/working_directory_per_pipeline.py @@ -19,9 +19,9 @@ def evaluate_pipeline(pipeline_directory: Path, float1, categorical, integer1): class HPOSpace(neps.PipelineSpace): - float1 = neps.Float(min_value=0, max_value=1) + float1 = neps.Float(lower=0, upper=1) categorical = neps.Categorical(choices=(0, 1)) - integer1 = neps.Integer(min_value=0, max_value=1) + integer1 = neps.Integer(lower=0, upper=1) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/efficiency/expert_priors_for_hyperparameters.py b/neps_examples/efficiency/expert_priors_for_hyperparameters.py index f966d0731..38ea210ca 100644 --- a/neps_examples/efficiency/expert_priors_for_hyperparameters.py +++ b/neps_examples/efficiency/expert_priors_for_hyperparameters.py @@ -24,15 +24,15 @@ def evaluate_pipeline(some_float, some_integer, some_cat): # that speeds up the search class HPOSpace(neps.PipelineSpace): some_float = neps.Float( - min_value=1, - max_value=1000, + lower=1, + upper=1000, log=True, prior=900, prior_confidence="medium", ) some_integer = neps.Integer( - min_value=0, - max_value=50, + lower=0, + upper=50, prior=35, prior_confidence="low", ) diff --git a/neps_examples/efficiency/multi_fidelity.py b/neps_examples/efficiency/multi_fidelity.py index 1e5ff32a3..b3fb2781d 100644 --- a/neps_examples/efficiency/multi_fidelity.py +++ b/neps_examples/efficiency/multi_fidelity.py @@ -84,8 +84,8 @@ def evaluate_pipeline( class HPOSpace(neps.PipelineSpace): - learning_rate = neps.Float(min_value=1e-4, max_value=1e0, log=True) - epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=10)) + learning_rate = neps.Float(lower=1e-4, upper=1e0, log=True) + epoch = neps.Fidelity(neps.Integer(lower=1, upper=10)) logging.basicConfig(level=logging.INFO) @@ -95,5 +95,5 @@ class HPOSpace(neps.PipelineSpace): root_directory="results/multi_fidelity_example", # Optional: Do not start another evaluation after <=50 epochs, corresponds to cost # field above. - fidelities_to_spend=20 + fidelities_to_spend=20, ) diff --git a/neps_examples/efficiency/multi_fidelity_and_expert_priors.py b/neps_examples/efficiency/multi_fidelity_and_expert_priors.py index af9ce63c8..2f582803f 100644 --- a/neps_examples/efficiency/multi_fidelity_and_expert_priors.py +++ b/neps_examples/efficiency/multi_fidelity_and_expert_priors.py @@ -14,25 +14,25 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): class HPOSpace(neps.PipelineSpace): float1 = neps.Float( - min_value=1, - max_value=1000, + lower=1, + upper=1000, log=False, prior=600, prior_confidence="medium", ) float2 = neps.Float( - min_value=-10, - max_value=10, + lower=-10, + upper=10, prior=0, prior_confidence="medium", ) integer1 = neps.Integer( - min_value=0, - max_value=50, + lower=0, + upper=50, prior=35, prior_confidence="low", ) - fidelity = neps.Fidelity(neps.Integer(min_value=1, max_value=10)) + fidelity = neps.Fidelity(neps.Integer(lower=1, upper=10)) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/efficiency/pytorch_lightning_ddp.py b/neps_examples/efficiency/pytorch_lightning_ddp.py index cd3bb0896..99dd117fc 100644 --- a/neps_examples/efficiency/pytorch_lightning_ddp.py +++ b/neps_examples/efficiency/pytorch_lightning_ddp.py @@ -86,8 +86,8 @@ def evaluate_pipeline(lr=0.1, epoch=20): class HPOSpace(neps.PipelineSpace): - lr = neps.Float(min_value=0.001, max_value=0.1, log=True, prior=0.01) - epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=3)) + lr = neps.Float(lower=0.001, upper=0.1, log=True, prior=0.01) + epoch = neps.Fidelity(neps.Integer(lower=1, upper=3)) logging.basicConfig(level=logging.INFO) @@ -95,5 +95,5 @@ class HPOSpace(neps.PipelineSpace): evaluate_pipeline=evaluate_pipeline, pipeline_space=HPOSpace(), root_directory="results/pytorch_lightning_ddp", - fidelities_to_spend=5 - ) + fidelities_to_spend=5, +) diff --git a/neps_examples/efficiency/pytorch_lightning_fsdp.py b/neps_examples/efficiency/pytorch_lightning_fsdp.py index 73835a733..14c7bb481 100644 --- a/neps_examples/efficiency/pytorch_lightning_fsdp.py +++ b/neps_examples/efficiency/pytorch_lightning_fsdp.py @@ -57,12 +57,12 @@ def evaluate_pipeline(lr=0.1, epoch=20): logging.basicConfig(level=logging.INFO) class HPOSpace(neps.PipelineSpace): - lr = neps.Float(min_value=0.001, max_value=0.1, log=True, prior=0.01) - epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=3)) + lr = neps.Float(lower=0.001, upper=0.1, log=True, prior=0.01) + epoch = neps.Fidelity(neps.Integer(lower=1, upper=3)) neps.run( evaluate_pipeline=evaluate_pipeline, pipeline_space=HPOSpace(), root_directory="results/pytorch_lightning_fsdp", - fidelities_to_spend=5 - ) + fidelities_to_spend=5, + ) diff --git a/neps_examples/efficiency/pytorch_native_ddp.py b/neps_examples/efficiency/pytorch_native_ddp.py index 79debab5a..9fc4741d5 100644 --- a/neps_examples/efficiency/pytorch_native_ddp.py +++ b/neps_examples/efficiency/pytorch_native_ddp.py @@ -107,13 +107,15 @@ def evaluate_pipeline(learning_rate, epochs): class HPOSpace(neps.PipelineSpace): - learning_rate = neps.Float(min_value=10e-7, max_value=10e-3, log=True) - epochs = neps.Integer(min_value=1, max_value=3) + learning_rate = neps.Float(lower=10e-7, upper=10e-3, log=True) + epochs = neps.Integer(lower=1, upper=3) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) - neps.run(evaluate_pipeline=evaluate_pipeline, - pipeline_space=HPOSpace(), - root_directory="results/pytorch_ddp", - evaluations_to_spend=25) + neps.run( + evaluate_pipeline=evaluate_pipeline, + pipeline_space=HPOSpace(), + root_directory="results/pytorch_ddp", + evaluations_to_spend=25, + ) diff --git a/neps_examples/efficiency/pytorch_native_fsdp.py b/neps_examples/efficiency/pytorch_native_fsdp.py index cfe6d3831..775219ed1 100644 --- a/neps_examples/efficiency/pytorch_native_fsdp.py +++ b/neps_examples/efficiency/pytorch_native_fsdp.py @@ -209,8 +209,8 @@ def evaluate_pipeline(lr=0.1, epoch=20): logging.basicConfig(level=logging.INFO) class HPOSpace(neps.PipelineSpace): - lr = neps.Float(min_value=0.0001, max_value=0.1, log=True, prior=0.01) - epoch = neps.Fidelity(neps.Integer(min_value=1, max_value=3)) + lr = neps.Float(lower=0.0001, upper=0.1, log=True, prior=0.01) + epoch = neps.Fidelity(neps.Integer(lower=1, upper=3)) neps.run( evaluate_pipeline=evaluate_pipeline, diff --git a/neps_examples/test_files/algo_comparisons.ipynb b/neps_examples/test_files/algo_comparisons.ipynb new file mode 100644 index 000000000..99234baaf --- /dev/null +++ b/neps_examples/test_files/algo_comparisons.ipynb @@ -0,0 +1,161 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f261ba3d", + "metadata": {}, + "source": [ + "## The new, NePS-based algorithms should perform at least as well as the old ones." + ] + }, + { + "cell_type": "markdown", + "id": "dfe93097", + "metadata": {}, + "source": [ + "Test on the Hartmann6 function with (and without) fidelity" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e5f2a155", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "-0.3703557067629127\n", + "-1.7570641798509674\n", + "-3.322368011415477\n" + ] + } + ], + "source": [ + "import neps\n", + "from neps import algorithms\n", + "import matplotlib.pyplot as plt\n", + "from pprint import pprint\n", + "import numpy as np\n", + "\n", + "# Parameters for the Hartmann 6D function\n", + "alpha = np.array([1.0, 1.2, 3.0, 3.2])\n", + "A = np.array([\n", + " [10.0, 3.0, 17.0, 3.5, 1.7, 8.0],\n", + " [0.05, 10.0, 17.0, 0.1, 8.0, 14.0],\n", + " [3.0, 3.5, 1.7, 10.0, 17.0, 8.0],\n", + " [17.0, 8.0, 0.05, 10.0, 0.1, 14.0]\n", + "])\n", + "P = 1e-4 * np.array([\n", + " [1312, 1696, 5569, 124, 8283, 5886],\n", + " [2329, 4135, 8307, 3736, 1004, 9991],\n", + " [2348, 1451, 3522, 2883, 3047, 6650],\n", + " [4047, 8828, 8732, 5743, 1091, 381]\n", + "])\n", + "# P should be divided by 10000 to match the common constants (1e-4 factor)\n", + "\n", + "def hartmann6(x1, x2, x3, x4, x5, x6):\n", + " \"\"\"x must be a 6-dimensional numpy array or list-like.\"\"\"\n", + " x = np.array([x1, x2, x3, x4, x5, x6])\n", + " r = A * (x - P)**2\n", + " return -np.sum(alpha * np.exp(-np.sum(r, axis=1)))\n", + "\n", + "def mf_hartmann6(x1, x2, x3, x4, x5, x6, fidelity=10):\n", + " \"\"\"Multi-fidelity Hartmann 6D function.\n", + " \n", + " fidelity: float in (1, 10), where 1 is the lowest fidelity and 10 is the highest.\n", + " The function value is scaled by (fidelity / 10) and noise is added inversely proportional to fidelity.\n", + " \"\"\"\n", + " if fidelity < 1.0 or fidelity > 10.0:\n", + " raise ValueError(\"Fidelity must be in the range [1, 10]\")\n", + " \n", + " base_value = hartmann6(x1, x2, x3, x4, x5, x6)\n", + " noise = np.random.normal(0, (10 - fidelity) / 10 * 0.1) # Noise decreases with higher fidelity\n", + " return {\"objective_to_minimize\" : base_value * (fidelity / 10) + noise,\n", + " \"cost\" : fidelity}\n", + "\n", + "global_optimum = [0.20168952, 0.15001069, 0.47687398, 0.2753324, 0.31165163, 0.65730053]\n", + "\n", + "print(mf_hartmann6(*global_optimum, fidelity=1)) # Should be approximately -3.32237\n", + "print(mf_hartmann6(*global_optimum, fidelity=5)) # Should be approximately -3.32237\n", + "print(mf_hartmann6(*global_optimum, fidelity=10)) # Should be approximately -3.32237\n" + ] + }, + { + "cell_type": "markdown", + "id": "c41bafb7", + "metadata": {}, + "source": [ + "Creating four search spaces: one without fidelity and three with, two of them with either good or bad priors." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f012aa3", + "metadata": {}, + "outputs": [], + "source": [ + "class HartmannSpaceFid(neps.PipelineSpace):\n", + " x1 = neps.Float(0, 1)\n", + " x2 = neps.Float(0, 1)\n", + " x3 = neps.Float(0, 1)\n", + " x4 = neps.Float(0, 1)\n", + " x5 = neps.Float(0, 1)\n", + " x6 = neps.Float(0, 1)\n", + " fidelity = neps.Fidelity(neps.Integer(1, 10))\n", + "\n", + "hartmann_space_fid = HartmannSpaceFid()\n", + "hartmann_space_base = hartmann_space_fid.remove(\"fidelity\")\n", + "\n", + "hartmann_space_fid_good_priors = hartmann_space_base\n", + "hartmann_space_fid_bad_priors = hartmann_space_base\n", + "for n, param in enumerate([\"x1\", \"x2\", \"x3\"]):\n", + " hartmann_space_fid_good_priors = hartmann_space_fid_good_priors.add_prior(param, global_optimum[n], \"medium\")\n", + " hartmann_space_fid_bad_priors = hartmann_space_fid_bad_priors.add_prior(param, 1, \"medium\")\n", + "\n", + "# print(\"Hartmann 6D space with fidelity:\")\n", + "# print(hartmann_space_fid)\n", + "# print(\"\\nHartmann 6D space without fidelity:\")\n", + "# print(hartmann_space)\n", + "# print(\"\\nHartmann 6D space with fidelity and good priors:\")\n", + "# print(hartmann_space_fid_good_priors)\n", + "# print(\"\\nHartmann 6D space with fidelity and bad priors:\")\n", + "# print(hartmann_space_fid_bad_priors)" + ] + }, + { + "cell_type": "markdown", + "id": "6f48abde", + "metadata": {}, + "source": [ + "We compare the following algorithms:\n", + "- Random Search (with and without priors and fidelity)\n", + "- HyperBand (with and without priors)\n", + "- PriorBand (with and without fidelity)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "neural-pipeline-search (3.13.1)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/neps_examples/test_files/algo_tests.ipynb b/neps_examples/test_files/algo_tests.ipynb new file mode 100644 index 000000000..d94a7ab39 --- /dev/null +++ b/neps_examples/test_files/algo_tests.ipynb @@ -0,0 +1,1403 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "4d423fb2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "2\n", + "2\n", + "2\n", + "2\n", + "2\n", + "2\n", + "2\n", + "2\n", + "2\n", + "3\n", + "3\n", + "3\n", + "4\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "0\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n", + "2\n", + "2\n", + "2\n", + "2\n", + "2\n", + "2\n", + "2\n", + "2\n", + "2\n", + "3\n", + "3\n", + "3\n", + "4\n", + "1\n", + "1\n", + "1\n", + "1\n", + "1\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAh8AAAGdCAYAAACyzRGfAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAALGtJREFUeJzt3Ql8VNXd//EfEUhQNgElIkSwLoAIVUSI0qospkgRBW1VVKTUFVGgVo2KaC2G2iqKsrgguIAorWChLqWRRWvCKgouCMpjohBwgwBKgjLP63ue/53/TJgEkkxOSPJ5v17XZGYuMydnxjnfe5Z7a4VCoZABAAB4kuDrhQAAAITwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMCr2naQ2bt3r23atMkaNGhgtWrVquziAACAA6Bzlu7YscNatGhhCQkJVSt8KHi0atWqsosBAADKIDc311q2bFm1wod6PILCN2zYsLKLAwAADkB+fr7rPAja8SoVPoKhFgUPwgcAAFXLgUyZYMIpAADwivABAAC8InwAAACvDro5HwAOvuVzP/74o/3000+VXRQAlaxOnTp2yCGHlPt5CB8AilVYWGibN2+277//vrKLAuAgmUyqZbT169cv1/MQPgAUe8K/jRs3uqMcnTSobt26nPgPqOG9oF999ZV98cUXdvzxx5erB4TwAaDYXg8FEK3bP/TQQyu7OAAOAkcccYT9z//8j+3Zs6dc4YMJpwBKtL/TJAOoOWrFqfeTbxUAAOAV4QMAarCzzz7bRowYUeI+06dPt8aNG9vBUD5Nfh44cKA7A7aOwrdt21Yp5aoKWrdubQ8//LAdjJjzAaDUxi/4xOvrjex9Qqn2v+qqq+yZZ56xjIwMu/3228P3z5071y688EI3ca6yqCFXYxqr0VRjOmfOHLvgggvCtwPBxN+LLrrI/V2JiYlxKc/LL7/slk9GNlgq3/4CiS9Fy6f39a233rJ33nnHmjVrZo0aNarU8qFs6PkAUC0lJSXZX/7yF/vuu++sKps2bZpb7qyVR5MmTbLnnnvO/vznP8ft+Zs0aXJAFwKrLEXL9+mnn1q7du2sQ4cOlpycXKY5CDpnjSZTY/+TzisK4QNAtdSrVy/XOKmXoCRvv/22/eIXv7B69eq5lT033XST7dq1K6on4L777rNLL73UDjvsMDv66KNt4sSJ4cfVi3LPPfdYSkqK641Q74SeI1403KG/Q2X79a9/bf3797dVq1YVu796Rm688cbwbfVgqIH++OOPww2K/o7//Oc/+wxr6PfPP//cRo4c6f5N0Yb9jTfecA2/zvHwq1/9yoWi0gzVqOcp8jlVbz//+c9doFI9qxfjkksusR07doT3KVq+Bx980JYsWeKeR7dFAfPKK6+0ww8/3K3M6tOnj61fv36fsvzzn/+09u3bu/cpJyfHvaaCnP6t/qZjjjnG7aPlpKpn3dexY0dbsWJFsX/n/t5//W2nnXaaC1B6Hy+77DLbunVr+PFFixa5v0V1e8opp7jPYY8ePdw+r732mqtvDTHp30Web0d/u95nbao39QKNHj26xF499bb9/ve/dytW9Jx6nffee2+f9+Opp56yNm3auABfUQgfAKolDVPcf//99uijj7rzEsSio2g1oppD8P7779uLL77owkhk4y1//etfrVOnTvbuu++6YZybb77ZFixY4B77xz/+YePHj7fHH3/cNXhqYE8++eQK+Zs++eQTe/PNN61r167F7nPWWWe5Bi2wePFi1zAF9y1fvtwtkzzjjDNiDnHoBFJ/+tOfXLCIDBdq+P72t7+5xlSNvxrvW265pdx/k94D1dn8+fPdpvKOGzcu5r4q39VXX22pqamubLodDLMpICg4ZGVluQb4vPPOc39nZPnVE6aG9YMPPrAjjzzS3a/37swzz3Tvbd++fe2KK65wYeTyyy93Ie9nP/uZu11co76/919lUHhVI6/HtEz1qquu2ud51PA/9thjbjgpNzfXfvOb37j5GjNnzrR//etf9u9//9t9liNpCKp27dq2bNkye+SRR+yhhx5yf19xLr744nCoWblypZ166qnWs2dP+/bbb8P7bNiwwf1NqtvVq1dbRWHOx8KSj4pKdE56PEsCIM40v0NHcmPGjLGpU6fu87h6RQYNGhQ+staJkyZMmOAa8MmTJ4eP/NQ4BXNHTjjhBPvvf//rGpzevXu7RlhHtOpp0dwEHQGffvrpJZZr+/btB3yGSPW4KEjpFPcFBQWu9yM9vfjvHh0RKxzp6F0N04cffuiOiBU+rrvuOvezS5cuMc/doiEOvVZwlB5JjeiUKVNcYywKaAop5aXhD/VMBEMravwzMzNt7NixMcuncuuEd0H51OArdOg9CQLVjBkzXE+RGns1uEH5NWylEBlJIeXaa691v999993ufVf9BP/utttuc2Fny5Yt+9SJ7O/9/93vfhf+/dhjj3Wfry5dutjOnTujPgPqgdHnTIYOHereYwUz/ZugR2vhwoWuPAH9jfocqufkxBNPtDVr1rjbCmhFKVQrpCh8BPOFFCZVR3//+9/tmmuuCfeMPfvss653pCLR8wGgWtPRro4QP/roo30e09GoGj41AsGWlpYWPrtrQI1PJN0Onk+N1A8//OAaCX3pa8KogkJJ1NDqqLLoFosaEz2msqpnQL0faqCLo7kQaqTVg6CJmerKV2DRbdHPYLiiNNToB8FDjjrqqKjhg7LS0EfknI7SPq/eB4WsyN6gpk2busY48j1XYNEQSlGR9zVv3tz9jOy5CO4rrkz7e//Vw9CvXz8XSvR3KtgGoaWkcqi+g+AR3Fe0DN26dYsaxtLnUmEs1nWY9PlR4FHdRH7e9TlXyAlo6Kmig4fQ8wGgWvvlL3/pAoWOJIt2d+vLWEe9seZoqLE4EDr6XLdunZtDoaGYG264wQ3TqJGPXKVR9MRtxx133AE9v46qg33VoGo+hHpDdKQc6znUGOlvVg+HjnAVNNSwqddk7dq1rlu/LMMlRf8WvU5J8wv0NxZ9PHIYpKTnrYjJoJpLEWtyauTrB4/Huq+4MpX0/qsXQZ89beqNUaOu0JGWlrbPZM6irxnvetFnXcEuckguEDk3R/OBfCB8AKj2NIdAwy9qvCNpzFvDEvsLAtnZ2fvc1kTAyIZNR7fahg0bZm3btnVd4Hr+eAtOaa2j7eLo6PrJJ5904UPDFwoCCiRqFBVCgu79WNRDEI8rGKuhVVDS5N2gQauIOQR6H9TTsHTp0vCwyzfffOMCgSaX+lDc+6/wpbLo86eQIiVNXi0t/c1FP5fFXXNFn8W8vDzXS6TepsrGsAuAak/d6JrbofH2SBo/V0+A5i+oYVSX9SuvvLLPhFPNJ3jggQfckIdWusyePdvNqxAN22g+iXoVPvvsM3v++eddY6Tu63jQCgU1Gps2bXJH05pnoXknkeGnKPV2KFRpYmX37t3D9+noWysvSjq6VcOkCaVffvmlff3112Uut4ZBNHRwxx13uG59TZxUXcWbGlutTNGQh+Y1aHhBk0W1Kkn3V7SS3n/1ninMaaKoHtPcFE0+jRf1oowaNcoFrRdeeMG9TvC5LEpzUjQso3PIaPKqJr7qs3/nnXfGNRAdKMIHgBpBjXbRbmsNR6hBV6jQclvNj9CkQy2XjPSHP/zBfUHrcQ13aFWBus6DLmv1Mqg3Qc+n7vd58+a5sfV4GDJkiOsu1yoUDbecdNJJbrWCjmBLClsql3p7gkmNCh/q0djffA/Vkxomze8oz9i/5p2oIX711VddedQ4akVHRZ0LpXPnzm5uixpY9TjodYsb9oqnkt5/1Z/CicKqemHUA6JJnvGiVTjqAdMEV/W4KHgEE0eL0rCN6kQ9YPpMKcBqWbOWVgfzWnyqFarMU/3FkJ+f79Ysaza41iFX9JkZu+U8Uebnyk6J/SZX9tkdgXjYvXu3m4xW0ev9D3YH2xk/AVGIVLj0ffr0kr4XStN+0/MBAAC8InwAAACvWO0CACXQ/AfgYLMoxpLZqoSeDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAgAqmU1vPnTu3xH10xV1dd+NgKN/HH3/sLteuM1jqLJoofhm26q4iLphX3XGeDwCltzDD7+udk16q3dWQ64JsRRt8nRvhnHPOse+++85dkyO4HVBje+yxx5Z4jYyy2Lx5sx1++OHhBkunpn733XcPmoY9snwyZswYd/E5XbAsuDYMEE+EDwA1nhpZXYtCF+nSRcGuv/56d2G1nj17xuX5k5OT7WBWtHy6Cm3fvn3LdWXewsJCd0VXlKywhtYTwy4AarwjjzzSNcDqkbjpppvcz1WrVsXcV9fi1NVK//73v4fvUw+Grjwb0KXdExMT7fvvv99nWEPPLbpCru4vepVZXfVUz6WroupKpXv27CnVUI0ugBf5nPpdf9Ott97qrjSrv7Po1WUjy6ffV65c6a5uq9+DfdesWWM9evRwl4tX2dQztHPnzn3KMnbsWHdV4BNPPDE8LPHSSy+5qwbr33bp0sVdRXj58uV22mmnuZ6VPn362FdffVXs36meqkGDBrl613Mcf/zx7kq2gdtuu81dpfXQQw91PVejR4+Oqjf9DXqPnn76aXeZe73mDTfc4K7y+8ADD7g60WdAZS9aL5MnT3bl0+vquSPf91jWrl3r9tdr6GqxV1xxhX399ddR78eNN97o3qdmzZqFr45c0xA+ACAiWLz++uuWk5NjXbt2jbmPGiRdljw4vbUaxo8++sj1mmiuhCxevNg1smoMi1q2bJn7qUuva7jj5ZdfDj+2cOFC1+ugn88884y7HLu28tJzaRhl6dKlrrFVsFiwYEHMfVWmk046yf7whz+432+55RbbtWuXayQ1NKPQoEvEq/xqRCNlZma6XiQ99/z586OGce666y4X6GrXrm2XXXaZC0OPPPKIvfXWW7Zhwwa7++67iy2/wsSHH35or732mqtrBQI13IEGDRq4etI+ek5d4n78+PFRz6F61b/X+/vCCy/Y1KlTXe/OF1984d6vv/zlL66MqqOirz1w4EB77733XADSZehVhlg01KeApmC5YsUK91pbtmyx3/zmN/u8H3Xr1rX//ve/NmXKFKuJGHYBUC2p8Ss6X0FHurG0bNnS/SwoKLC9e/e6xlkBozg6en388cfd70uWLHGNjY6eFUjatm3rfp511lkx/62O3kW9B0WHO9S4P/bYY3bIIYe451HjqAb96quvtvLo2LGjCwCiXgO9hp63d+/e++yrMikgqO6C8qkx16XUn332WRdiRM/Rr18/12jrCF/02FNPPRUeRgiui6MAExzhaz7NpZde6l7/zDPPdPcNHTq0xJClMKg6Vk+JtG7dOupxhYaAHtPrzZo1ywWcgN5X9XwoqLRv397N9VFQevXVVy0hIcH11OhvUfCLDJ4XX3yx/f73v3e/33fffS5YPfroozZp0qR9yqk6UTnvv//+8H16zVatWrneHvXOBO/BAw88YDUZ4QNAtaTGRUfIkXRUe/nll++zr46+1SgpfKhnQkf0GqLQ3I9YFCzUiGqoQEfNCiNB+FBD+s4770Q1fAdKPQ4KHgENv2i4o7wUPiLpebdu3XrA/15H+p06dQoHD1FwUIOuBjwIHyeffHLM+QuRrx+5b+R9JZVH74N6H9Rzcu6557rhnTPOOCP8+IsvvmgTJkxwvRsaCvrxxx/dHJ5ICiV6jyNfU3Wt4FFSOVJTU/e5XdzqFvWOKLzEmqSrsgXho3PnzlbTET4AVEtqKI877rio+9TFHovmYWj1SxAAFFI0/l9c+FDDqXCi4KFN+yp86MhZwxKabxDZOB6oOnXq7DPEowa+OGo4NVQUKdYckdI+b1lFhpPiXl+vHeu+ksqjORSff/6566VQz4MmAms+jObHZGVlueGQe++91/WuNGrUyPV6PPjgg8WWIXjNeNeLgk/QG1RU5Jygw4qpp5qEOR8AUISOiDWHozhqpDSB8pVXXrEPPvjAunfv7o7u1XOi4RgNDxTXwAQ9A8UNAZWGhnA0LyNSRZxzol27du6oXnM/ApqvEAxX+KC/dfDgwfb888/bww8/bE888YS7X71MWpVz5513unrXkIaCSrxkZ2fvc1v1Ecupp57qPg/qZVHwjdwIHNEIHwBqPHW15+XluUZLkymfe+4569+/f4n/RkMtmrioVRTqZldDrHkiM2bMKHa+h2hVhVZOBJMRt2/fXuZya3KjJjZqLsb69evdvA6ttog39SzoHChq/PX8GloYPny4W8kRDKNUJE1GVdDTxFQ17prPEwQAhQ3NCVFvh4Y2NPwyZ86cuL22Pg+at6E5G6rfYFguFvXGfPvtt25Oi3rAVJ433njDhgwZEpewWZ0QPgDUeDp6V7e4jlC1bPPaa691kwpLooChBqXostai9xWlyZxqINVDoiWp+ws5JdEwg1ZjaH6JVtfs2LHDrrzySos3rdpRI6qGVa9z0UUXuaEPTbD0Qb1F6enprndJAU89Uwobcv7559vIkSNdIFAQVE+I6iReNJyj19JrK+QpcGrCaix6P9UjpM+A5qZoeE5LajWkFzm3BGa1QkUHDCtZfn6+G7PT0UDRCUPxMH7BJ1G3u+X8X9ddWWSnxO8MiCUZ2fv/JikBPml1w8aNG918CB31AjWNhtfUi1JZp72vat8LpWm/iWIAAMArwgcAAPCKpbYAAMRwkM1KqFbo+QAAAF4RPgAAgFeEDwAlousZQLy/DwgfAGIKTj0dXBYeAAoLC93PyGsQlQUTTgHEpC8XnRwpuNCWTjQVXJcDQM2zd+9edzFFfRfoZHnlQfgAUKzgkuqluQIqgOorISHBUlJSyn0gQvgAUCx9wei047oeSayrpQKoWerWrRuXU8UTPgAc0BBMecd4ASBQqvhyzz33uCOhyK1t27ZR53zXVf2aNm3qrvI4cOBAd9VGAACAQKn7Tk466STbvHlzeHv77bfDj+nKgvPmzXOXIF68eLFt2rTJBgwYUNqXAAAA1Viph100wzWYhBZJV7GbOnWqzZw503r06OHumzZtmrVr186ys7OtW7du8SkxAACoWT0f69evtxYtWtixxx5rgwYNspycHHf/ypUr3YS0Xr16hffVkIxmxWZlZcW31AAAoGb0fHTt2tWmT59uJ554ohtyuffee+0Xv/iFrV271vLy8twsWJ0XIFLz5s3dY8UpKChwWyA/P78sfwcAAKiO4aNPnz7h3zt27OjCyDHHHGMvvfSS1atXr0wFyMjIcCEGAADUDOVarKtejhNOOME2bNjg5oHotKvbtm2L2kerXWLNEQmkp6e7+SLBlpubW54iAQCA6hw+du7caZ9++qk7CVHnzp3dtSAyMzPDj69bt87NCUlNTS32ORITE61hw4ZRGwAAqL5KNexyyy23WL9+/dxQi5bRjhkzxp146NJLL7VGjRrZ0KFDbdSoUdakSRMXIoYPH+6CBytdAABAmcLHF1984YLGN998Y0cccYR1797dLaPV7zJ+/Hh32lWdXEyTSNPS0mzSpEmleQkAAFDNlSp8zJo1q8THk5KSbOLEiW4DAACIpfxXhwEAACgFwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAAKpO+Bg3bpzVqlXLRowYEb5v9+7dNmzYMGvatKnVr1/fBg4caFu2bIlHWQEAQE0OH8uXL7fHH3/cOnbsGHX/yJEjbd68eTZ79mxbvHixbdq0yQYMGBCPsgIAgJoaPnbu3GmDBg2yJ5980g4//PDw/du3b7epU6faQw89ZD169LDOnTvbtGnT7J133rHs7Ox4lhsAANSk8KFhlb59+1qvXr2i7l+5cqXt2bMn6v62bdtaSkqKZWVlxXyugoICy8/Pj9oAAED1Vbu0/2DWrFm2atUqN+xSVF5entWtW9caN24cdX/z5s3dY7FkZGTYvffeW9piAACAmtDzkZubazfffLPNmDHDkpKS4lKA9PR0N1wTbHoNAABQfZUqfGhYZevWrXbqqada7dq13aZJpRMmTHC/q4ejsLDQtm3bFvXvtNolOTk55nMmJiZaw4YNozYAAFB9lWrYpWfPnrZmzZqo+4YMGeLmddx2223WqlUrq1OnjmVmZroltrJu3TrLycmx1NTU+JYcAABU//DRoEED69ChQ9R9hx12mDunR3D/0KFDbdSoUdakSRPXizF8+HAXPLp16xbfkgMAgJox4XR/xo8fbwkJCa7nQytZ0tLSbNKkSfF+GQAAUFPDx6JFi6JuayLqxIkT3QYAAFAU13YBAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAAABeET4AAIBXhA8AAHDwho/Jkydbx44drWHDhm5LTU211157Lfz47t27bdiwYda0aVOrX7++DRw40LZs2VIR5QYAADUhfLRs2dLGjRtnK1eutBUrVliPHj2sf//+9sEHH7jHR44cafPmzbPZs2fb4sWLbdOmTTZgwICKKjsAAKiCapdm5379+kXdHjt2rOsNyc7OdsFk6tSpNnPmTBdKZNq0adauXTv3eLdu3eJbcgAAULPmfPz00082a9Ys27Vrlxt+UW/Inj17rFevXuF92rZtaykpKZaVlVXs8xQUFFh+fn7UBgAAqq9Sh481a9a4+RyJiYl23XXX2Zw5c6x9+/aWl5dndevWtcaNG0ft37x5c/dYcTIyMqxRo0bhrVWrVmX7SwAAQPUMHyeeeKKtXr3ali5datdff70NHjzYPvzwwzIXID093bZv3x7ecnNzy/xcAACgms35EPVuHHfcce73zp072/Lly+2RRx6x3/72t1ZYWGjbtm2L6v3Qapfk5ORin089KNoAAEDNUO7zfOzdu9fN21AQqVOnjmVmZoYfW7duneXk5Lg5IQAAAKXu+dAQSZ8+fdwk0h07driVLYsWLbI33njDzdcYOnSojRo1ypo0aeLOAzJ8+HAXPFjpAgAAyhQ+tm7daldeeaVt3rzZhQ2dcEzBo3fv3u7x8ePHW0JCgju5mHpD0tLSbNKkSaV5CQAAUM2VKnzoPB4lSUpKsokTJ7oNAAAgFq7tAgAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADg4A0fGRkZ1qVLF2vQoIEdeeSRdsEFF9i6deui9tm9e7cNGzbMmjZtavXr17eBAwfali1b4l1uAABQE8LH4sWLXbDIzs62BQsW2J49e+zcc8+1Xbt2hfcZOXKkzZs3z2bPnu3237Rpkw0YMKAiyg4AAKqg2qXZ+fXXX4+6PX36dNcDsnLlSvvlL39p27dvt6lTp9rMmTOtR48ebp9p06ZZu3btXGDp1q1bfEsPAABq1pwPhQ1p0qSJ+6kQot6QXr16hfdp27atpaSkWFZWVsznKCgosPz8/KgNAABUX2UOH3v37rURI0bYmWeeaR06dHD35eXlWd26da1x48ZR+zZv3tw9Vtw8kkaNGoW3Vq1albVIAACgOocPzf1Yu3atzZo1q1wFSE9Pdz0owZabm1uu5wMAANVozkfgxhtvtPnz59uSJUusZcuW4fuTk5OtsLDQtm3bFtX7odUueiyWxMREtwEAgJqhVD0foVDIBY85c+bYm2++aW3atIl6vHPnzlanTh3LzMwM36eluDk5OZaamhq/UgMAgJrR86GhFq1keeWVV9y5PoJ5HJqrUa9ePfdz6NChNmrUKDcJtWHDhjZ8+HAXPFjpAgAASh0+Jk+e7H6effbZUfdrOe1VV13lfh8/frwlJCS4k4tpJUtaWppNmjSJ2gYAAKUPHxp22Z+kpCSbOHGi2wAAAIri2i4AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAADi4w8eSJUusX79+1qJFC6tVq5bNnTs36vFQKGR33323HXXUUVavXj3r1auXrV+/Pp5lBgAANSl87Nq1yzp16mQTJ06M+fgDDzxgEyZMsClTptjSpUvtsMMOs7S0NNu9e3c8ygsAAKq42qX9B3369HFbLOr1ePjhh+2uu+6y/v37u/ueffZZa968ueshueSSS8pfYgAAUKXFdc7Hxo0bLS8vzw21BBo1amRdu3a1rKyseL4UAACoKT0fJVHwEPV0RNLt4LGiCgoK3BbIz8+PZ5EAAMBBptJXu2RkZLjekWBr1apVZRcJAABUlfCRnJzsfm7ZsiXqft0OHisqPT3dtm/fHt5yc3PjWSQAAFCdw0ebNm1cyMjMzIwaRtGql9TU1Jj/JjEx0Ro2bBi1AQCA6qvUcz527txpGzZsiJpkunr1amvSpImlpKTYiBEj7M9//rMdf/zxLoyMHj3anRPkggsuiHfZAQBATQgfK1assHPOOSd8e9SoUe7n4MGDbfr06Xbrrbe6c4Fcc801tm3bNuvevbu9/vrrlpSUFN+SAwCAmhE+zj77bHc+j+LorKd/+tOf3AYAAHDQrXYBAAA1C+EDAAB4RfgAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV7X9vhzKYvyCT6yqGdn7hMouAgDgIEXPBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8YqltFdMt54ky/9vslGviWhYAAMqCng8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAAABecW0XINLCjLL9u3PS410SAKi26PkAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF6x2gX4f8Yv+MS65XxTpn+b/eMnVhlG9j6hUl4XQPR3R1UzspK/O+j5AAAA1SN8TJw40Vq3bm1JSUnWtWtXW7ZsWUW9FAAAqOnh48UXX7RRo0bZmDFjbNWqVdapUydLS0uzrVu3VsTLAQCAmh4+HnroIbv66qttyJAh1r59e5syZYodeuih9vTTT1fEywEAgJo84bSwsNBWrlxp6en//3TTCQkJ1qtXL8vKytpn/4KCArcFtm/f7n7m5+dbRdi9a2fU7V0/FMTtuXyoKuWtqPevIql+ylq/lfFZqKr1DFQ3lfX//8H23RE8ZygU2v/OoTj78ssv9aqhd955J+r+P/7xj6HTTz99n/3HjBnj9mdjY2NjY2OzKr/l5ubuNytU+lJb9ZBofkhg79699u2331rTpk2tVq1a5UpgrVq1stzcXGvYsGGcSotYqGu/qG9/qGt/qOuqX9fq8dixY4e1aNFiv/vGPXw0a9bMDjnkENuyZUvU/bqdnJy8z/6JiYlui9S4ceO4lUcVywfZD+raL+rbH+raH+q6atd1o0aNKmfCad26da1z586WmZkZ1Zuh26mpqfF+OQAAUMVUyLCLhlEGDx5sp512mp1++un28MMP265du9zqFwAAULNVSPj47W9/a1999ZXdfffdlpeXZz//+c/t9ddft+bNm5svGsrReUaKDukg/qhrv6hvf6hrf6jrmlXXtTTrtNJeHQAA1Dhc2wUAAHhF+AAAAF4RPgAAgFeEDwAA4FW1DR8TJ0601q1bW1JSknXt2tWWLVtW2UWq8jIyMqxLly7WoEEDO/LII+2CCy6wdevWRe2ze/duGzZsmDtDbf369W3gwIH7nHAOpTNu3Dh3tt8RI0aE76Oe4+vLL7+0yy+/3NVnvXr17OSTT7YVK1aEH9e8fK3eO+qoo9zjulbV+vXrK7XMVdFPP/1ko0ePtjZt2rh6/NnPfmb33Xdf1LVAqOuyWbJkifXr18+dXVTfF3Pnzo16/EDqVWcXHzRokDvxmE72OXToUNu5s4KuWxOqhmbNmhWqW7du6Omnnw598MEHoauvvjrUuHHj0JYtWyq7aFVaWlpaaNq0aaG1a9eGVq9eHTrvvPNCKSkpoZ07d4b3ue6660KtWrUKZWZmhlasWBHq1q1b6IwzzqjUcldly5YtC7Vu3TrUsWPH0M033xy+n3qOn2+//TZ0zDHHhK666qrQ0qVLQ5999lnojTfeCG3YsCG8z7hx40KNGjUKzZ07N/Tee++Fzj///FCbNm1CP/zwQ6WWvaoZO3ZsqGnTpqH58+eHNm7cGJo9e3aofv36oUceeSS8D3VdNq+++mrozjvvDL388svu+ipz5syJevxA6vVXv/pVqFOnTqHs7OzQW2+9FTruuONCl156aagiVMvwoQvYDRs2LHz7p59+CrVo0SKUkZFRqeWqbrZu3eo+5IsXL3a3t23bFqpTp477Qgl89NFHbp+srKxKLGnVtGPHjtDxxx8fWrBgQeiss84Khw/qOb5uu+22UPfu3Yt9fO/evaHk5OTQX//61/B9eg8SExNDL7zwgqdSVg99+/YN/e53v4u6b8CAAaFBgwa536nr+CgaPg6kXj/88EP375YvXx7e57XXXgvVqlXLXTA23qrdsEthYaGtXLnSdSkFEhIS3O2srKxKLVt1s337dvezSZMm7qfqfc+ePVF137ZtW0tJSaHuy0DDKn379o2qT6Ge4+uf//ynOxvzxRdf7IYTTznlFHvyySfDj2/cuNGdLDGyvnX9Cg3nUt+lc8YZZ7hLbXzyySfu9nvvvWdvv/229enTx92mrivGgdSrfmqoRf8vBLS/2s+lS5fGvUyVflXbePv666/duGLRs6nq9scff1xp5apudL0ezUE488wzrUOHDu4+fbh1bZ+iFwZU3esxHLhZs2bZqlWrbPny5fs8Rj3H12effWaTJ092l4W44447XJ3fdNNNro51mYigTmN9p1DfpXP77be7K6oqLOsCpPquHjt2rJtnINR1xTiQetVPhe9ItWvXdgeXFVH31S58wN9R+dq1a91RC+JLl7m++eabbcGCBW7CNCo+SOto7/7773e31fOhz/aUKVNc+ED8vPTSSzZjxgybOXOmnXTSSbZ69Wp3EKNJktR1zVLthl2aNWvmEnXRmf+6nZycXGnlqk5uvPFGmz9/vi1cuNBatmwZvl/1q2Gvbdu2Re1P3ZeOhlW2bt1qp556qjvy0LZ48WKbMGGC+11HK9Rz/Gj2f/v27aPua9euneXk5LjfgzrlO6X8/vjHP7rej0suucStKLriiits5MiRbiWdUNcV40DqVT/1vRPpxx9/dCtgKqLuq134UFdp586d3bhi5JGNbqemplZq2ao6zWNS8JgzZ469+eabbrlcJNV7nTp1oupeS3H1JU7dH7iePXvamjVr3FFhsOnIXF3Twe/Uc/xo6LDoknHNSTjmmGPc7/qc68s3sr41dKBxcOq7dL7//ns3hyCSDhb1HS3UdcU4kHrVTx3Q6OAnoO95vTeaGxJ3oWq61FazeKdPn+5m8F5zzTVuqW1eXl5lF61Ku/76691SrUWLFoU2b94c3r7//vuoJaBafvvmm2+6JaCpqaluQ/lErnYR6jm+y5lr167tloGuX78+NGPGjNChhx4aev7556OWKeo75JVXXgm9//77of79+7P8swwGDx4cOvroo8NLbbUstFmzZqFbb701vA91XfbVce+++67b1LQ/9NBD7vfPP//8gOtVS21POeUUt+T87bffdqvtWGpbSo8++qj7ctb5PrT0VuuWUT76QMfadO6PgD7IN9xwQ+jwww93X+AXXnihCyiIb/ignuNr3rx5oQ4dOriDlrZt24aeeOKJqMe1VHH06NGh5s2bu3169uwZWrduXaWVt6rKz893n2N9NyclJYWOPfZYd26KgoKC8D7UddksXLgw5vezAt+B1us333zjwobOvdKwYcPQkCFDXKipCLX0n/j3pwAAANSQOR8AAODgRvgAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAABgPv0v6qomIQJ+6yEAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import neps\n", + "from neps import algorithms\n", + "from functools import partial\n", + "import matplotlib.pyplot as plt\n", + "global_values = []\n", + "eta=3\n", + "for algo in [partial(algorithms.neps_hyperband, eta=eta), \n", + " partial(algorithms.hyperband, eta=eta),]:\n", + " # partial(algorithms.neps_hyperband, sampler=\"prior\", eta=eta)]: \n", + " # partial(algorithms.hyperband, sampler=\"prior\", eta=eta)]:\n", + " neps.run(\n", + " evaluate_pipeline,\n", + " SimpleSpace(),\n", + " root_directory=\"neps_test_runs/algo_tests3\",\n", + " overwrite_root_directory=True,\n", + " optimizer=algo,\n", + " fidelities_to_spend=473\n", + " )\n" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "73b9e3d4", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAGdCAYAAAA44ojeAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAALaNJREFUeJzt3Q2czXXe//HPCEPuby4mi6jsIrohMbi2LVrJ1bLZWq1aydIWFbZVtuhqS0pCSpQV2kjZjeIqXXaIbOO+bCJ05YqtjNpi3GQov//j/X1cv/M/Z+6McY75npnX8/E4zZwb53zne07n+/59b37flCAIAgMAAPBIuZIuAAAAQG4EFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAd8pbEjp+/Lh9/vnnVq1aNUtJSSnp4gAAgCLQuWEPHDhgDRo0sHLlypW+gKJw0qhRo5IuBgAAKIbdu3dbw4YNS19AUc9J+AdWr169pIsDAACKIDs723UwhO14qQso4bCOwgkBBQCA5FKU6RlMkgUAAN4hoAAAAO8QUAAAgHeScg4KAP+WDn733Xf2/fffl3RRAJSgM844w8qXLx+XU4AQUACckqNHj9oXX3xhhw8fLumiAPDAmWeeaWeddZZVrFjxlJ6HgALglE6auHPnTnfUpBMv6QuJkycCZbcn9ejRo/bll1+674VmzZqd8GRshSGgACg2fRkppOi8BjpqAlC2Va5c2SpUqGCffvqp+36oVKlSsZ+LSbIATtmpHCUBKF3Kxen7gG8VAADgHQIKAKBQP/nJT2zo0KGFPmbWrFlWs2ZN86F8mrDdu3dvd6ZxzYnat29fiZQrGTRp0sQmTZpkPmIOCoCEmLh0+2l7rWFX/vCk/83NN99ss2fPtrFjx9q9994buX3hwoX285//3E34Kylq7NXg5tewqsFdsGCB9erVK3I9FE5W/sUvfuH+rtTU1LiU59VXX3XzCqIbNZXvRKHldMldPr2v77zzjr377rtWt25dq1GjRomWD8VDDwqAMksT+B577DH75ptvLJnNnDnTLfXWyolnnnnG/vznP9vDDz8ct+evXbt2kTZ3Kym5y/c///M/1qJFC2vVqpWlpaUVa2WZzumjCeAonCbCJgoBBUCZ1bVrV9eAqbehMKtWrbJ///d/dysUtGLpzjvvtEOHDsX0KDz00EN2ww03WJUqVewHP/iBTZkyJXK/emP+8z//0xo3bux6NdTLoeeIFw2t6O9Q2f7jP/7DevbsaRs3bizw8ephGTJkSOS6ekLUiH/00UeRRkd/x9/+9rc8Qyj6XSs0hg0b5v5N7sb/rbfecuGgatWqdtVVV7ngdDLDQurBin5O1dtFF13kQpfqWb0hffr0sQMHDkQek7t8TzzxhK1cudI9j66LQuivf/1rq1Wrlltx1r17d9uxY0eesrz++uvWsmVL9z7t2rXLvabCnv6t/qazzz7bPUZLaVXPuu2CCy6w9evXF/h3nuj91992ySWXuJCl9/FXv/qV7d27N3L/22+/7f4W1e3FF1/sPodXXHGFe8ybb77p6lvDWfp30ecj0t+u91kX1Zt6k0aNGlVo76B67X7zm9/Yv/3bv7nn1Ots2rQpz/vxpz/9yZo2bXpKq3ROhIACoMzSkMgjjzxiTz31lP3zn//M9zE6GldDqzkN//jHP+zll192gSW6gZfHH3/cLrzwQnvvvffckNFdd91lS5cudff99a9/tYkTJ9qzzz7rGkU1wq1bt07I37R9+3ZbtmyZtW/fvsDHXHbZZa7RC61YscI1XuFt69ats2PHjlnHjh3zHU5p2LCh/fGPf3ThIzqAqHEcP368a3AVENTA33333af8N+k9UJ0tXrzYXVTeRx99NN/HqnwDBw609PR0VzZdD4f0FCIULjIzM10jffXVV7u/M7r86lFT4/vhhx9avXr13O167zp16uTe2x49ethNN93kAsuNN97oguC5557rrhfU8J/o/VcZFHAVBHTf//7v/7ry5qZw8PTTT7uhq927d9v111/v5o/MnTvX/uu//sv++7//232Wo2m4S2d2Xbt2rT355JM2YcIE9/cV5LrrrosEnw0bNlibNm2sS5cu9vXXX0ce8/HHH7u/SXX7/vvvW6IwByUfmTNO/X+o0y19wPiSLgKQlDTfREeEDzzwgM2YMSPP/epd6du3b+QIXSefmjx5smvkp06dGjmCVAMWzmX54Q9/aH//+99do3TllVe6hlpHxuqx0VwJHUlfeumlhZZr//797ui8KNRzo7Cl7QZycnJcL8rIkSMLfLyOrBWg1AugxmvLli3uyFoB5be//a372a5du3zPbaPhFL1WeLQfTQ3ttGnTXIMtCnEKMqdKQy3q4QiHcRQQMjIybMyYMfmWT+XWSQPD8ikUKJjoPQlD15w5c1yPkwKBGuWw/BoiU9CMpiBz6623ut9Hjx7t3nfVT/jv7rnnHheIsrKy8tSJnOj9v+WWWyK/n3POOe7z1a5dOzt48GDMZ0A9OfqcyYABA9x7rPCmfxP2jC1fvtyVJ6S/UZ9D9cD86Ec/sg8++MBdV4jLTcFbQUYBJZy/pMCpOvrLX/5igwYNivSwvfDCC66XJZHoQQFQ5umoWUeaW7duzXOfjmrVOKqhCC/dunWLnEU3pAYqmq6Hz6eG7Ntvv3UNiRoGTXJVmCiMGmMdnea+5EcNju5TWdXDoF4UNeIF0dwMNeTqidBkUg0bKNTouuhnODRyMhQMwnAiOt159FBFcWmYJXqOyck+r94HBbHoXqU6deq4Bjv6PVeo0XBNbtG31a9f3/2M7gEJbyuoTCd6/9VTcc0117jgor9T4TcMNoWVQ/UdhpPwttxl6NChQ8yQmT6XCmz57Zulz49Ckeom+vOuz7mCUEjDXIkOJ0IPCoAy78c//rELHToizd21ri9sHT3nN2dEDUpR6Ch227Ztbk6Hhn1uv/12NySkIBC9+iT3ya7OO++8Ij2/js7Dx6rR1fwM9aroiDu/51CDpb9ZPSU6UlYYUeOn3pfNmze7IYTiDM3k/lv0OoXNd9DfmPv+6CGXwp43ERNYNbcjvwm10a8f3p/fbQWVqbD3X70R+uzpol4dNfwKJt26dcszATX3a8a7XvRZV/iLHv4LRc8V0vyk04GAAgBmbk6DhnrUwEfTGLyGQE4UFlavXp3nuiYvRjd+OkrWZfDgwda8eXPX3a7njzcNwYiO2guio/Tp06e7gKKhEoUFhRY1nAoq4VBCftTTEI+dq9UYK0xpwnHY6CViToPeB/VYrFmzJjLE869//cuFBk2IPR0Kev8V0FQWff4UZKSwCbcnS39z7s+lhinDz0g0fRb37NnjepvUa1XSGOIBgP/rstdcE43/R9N4vnoUNJ9Cjae6x1977bU8k2Q1v2HcuHFueEUreObPn+/meYiGiDS/Rb0Tn3zyib344ouuwVJXeTxo5YUals8//9wdlWveh+bBRAek3NRrouClyaCdO3eO3KajeK0oKewoWY2XJsF+9tln9tVXXxW73Bpy0TDFH/7wBzeEoMmeqqt4U4OsFTcaXtE8Cw1laIKrVlvp9kQr7P1XL5wCnya36j7NldGE2XhRb8zw4cNdGHvppZfc64Sfy9w0R0ZDQDrHjibcarKuPvv33XdfXENTURFQAOD/qGHP3UWuoQ81+goeWmqs+RqaKKmlotF+97vfuS9x3a+hFa2WUDd92D2u3gr1Suj51NW/aNEiN9YfD/3793dd81pdo6Gd888/363C0JFwYYFM5VKvUTgRUwFFPSMnmn+ielLjpfkmpzIXQfNg1Fi/8cYbrjxqQLVSJVHnimnbtq2ba6NGWD0Xet2ChtjiqbD3X/WnAKNAq94c9aRoYmq8aHWRetI0KVc9Nwon4WTX3DREpDpRT5o+Uwq5WtKtZeXhPJvTKSUoydMlFlN2drZb061Z7lqnHW+s4gGK5siRI24CXaLPh+A7386sCoiCpgLo6T6VfWHfCyfTftODAgAAvENAAQAA3mEVDwCcIs3HAHzzdj7LhZMJPSgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoACAB3Sa8YULFxb6GO20rH1SfCjfRx99ZB06dHBnCtXZSlHwEnTVXSI2QSztOA8KgMRYPvb0vdblI0/6n6ix1yZ7uUOBzh1x+eWX2zfffOP2UAmvh9Qgn3POOYXuaVIcX3zxhdWqVSvSqOk04e+99543jX90+eSBBx5wGwpqE7pwLx8gnggoAFAEaoi1d4g2XtNGb7fddpvbLK9Lly5xef60tDTzWe7yaffhHj16nNKOzEePHnU7+aJwR8toPTHEAwBFUK9ePddIq2fjzjvvdD83btyY72O1B6t2qf3LX/4SuU09IdpxOLRq1SpLTU21w4cP5xlC0XOLdkbW7bl3F9Zut3ou7YarHWqPHTt2UsNC2tQw+jn1u/6mESNGuB2G9Xfm3lU4unz6fcOGDW5XY/0ePvaDDz6wK664wipXruzKph6mgwcP5inLmDFj3G7QP/rRjyJDIK+88orbLVr/tl27dm736HXr1tkll1ziemi6d+9uX375ZYF/p3q8+vbt6+pdz9GsWTO3g3Honnvucbvznnnmma4HbNSoUTH1pr9B79Hzzz9vjRs3dq95++23u92dx40b5+pEnwGVPXe9TJ061ZVPr6vnjn7f87N582b3eL2Gdgm+6aab7Kuvvop5P4YMGeLep7p160Z2xS5rCCgAcBIUPpYsWWK7du2y9u3b5/sYNVrasj481bgaz61bt7reF83dkBUrVriGWA1mbmvXrnU///a3v7mhlVdffTVy3/Lly13vhX7Onj3bZs2a5S6nSs+lIZs1a9a4BlnhY+nSpfk+VmU6//zz7Xe/+537/e6777ZDhw65hlTDQAoW8+fPd+VXQxstIyPD9UbpuRcvXhwzZHT//fe70Fe+fHn71a9+5QLTk08+ae+88459/PHHNnr06ALLr8CxZcsWe/PNN11dKzSocQ9Vq1bN1ZMeo+ecPn26TZw4MeY5VK/693p/X3rpJZsxY4brJfrnP//p3q/HHnvMlVF1lPu1e/fubZs2bXIhqU+fPq4M+dGwokKcwuf69evda2VlZdn111+f5/2oWLGi/f3vf7dp06ZZWcQQD4AySw1k7vkTOmLOT8OGDd3PnJwcO378uGvAFUIKoqPgZ5991v2+cuVK1yDpKFyhpXnz5u7nZZddlu+/VS+AqBci99CKAsDTTz9tZ5xxhnseNaBq9AcOHGin4oILLnAhQdT7oNfQ81555ZV5HqsyKUSo7sLyqcE/cuSIvfDCCy7oiJ7jmmuucQ27egpE9/3pT3+KDFmE+xgp5IQ9BZrfc8MNN7jX79Spk7ttwIABhQYxBUbVsXpcpEmTJjH3K1iEdJ9eb968eS4EhfS+qgdFYaZly5Zu7pHC1BtvvGHlypVzPT76WxQOo8PpddddZ7/5zW/c7w899JALX0899ZQ988wzecqpOlE5H3nkkchtes1GjRq5XiP18oTvwbhx46wsI6AAKLPUAOlIO5qOjm+88cY8j9VRvBouBRT1cKhnQMMhmouSH4UPNbQaltDRtwJLGFDU2L777rsxjWNRqedC4SSkoR4NrZwqBZRoet69e/cW+d+rx+DCCy+MhBNRuFCjr0Y+DCitW7fOdz5F9OtHPzb6tsLKo/dBvRjqgfnpT3/qhpI6duwYuf/ll1+2yZMnu14SDTt99913bk5RNAUXvcfRr6m6VjgprBzp6el5rhe0ake9LAo4+U0sVtnCgNK2bVsr6wgoAMosNabnnXdezG3qzs+P5oVoVU8YEhRkNB+hoICixlUBRuFEFz1WAUVH4BoC0fyH6Aa0qCpUqJBnOEkhoCBqXDUsFS2/OSsn+7zFFR1gCnp9vXZ+txVWHs3p+PTTT11vh3owNHlZ83M0XyczM9MNvTz44IOul6ZGjRqu9+SJJ54osAzha8a7XhSOwl6l3KLnKFUpoJ7KEuagAEAx6Mhac0oKooZMkz5fe+01+/DDD61z586ul0A9MBr60VBEQY1Q2MNQ0HDTydBwkeaJREvEOTlatGjhegc0FyWk+RPh0MjpoL+1X79+9uKLL9qkSZPsueeec7ert0qrje677z5X7xo+UZiJl9WrV+e5rvrIT5s2bdznQb01CsfRF0JJLAIKABSBuvX37NnjGjZNAP3zn/9sPXv2LPTfaFhHky21OkRd+mqsNW9lzpw5Bc4/Ea0W0YqQcALl/v37i11uTcjUZEzNDdmxY4ebZ6JVJPGmHgqdI0YBQc+vYYw77rjDrVAJh2wSSRNoFQY1mVYBQPOLwpCgQKI5Kuo10TCKhnoWLFgQt9fW50HzSDSHRPUbDgHmR706X3/9tZtjo540leett96y/v37xyWQliYEFAAoAvUCqAteR7pasnrrrbe6iZCFUQhRo5N7SW/u23LTBFQ1oupp0XLcEwWhwmhIQ6tMNN9Fq4YOHDhgv/71ry3etBpJDa0aX73OL37xCzfMokmhp4N6nUaOHOl6qRQC1cOlQCI/+9nPbNiwYS40KCyqR0V1Ei8aOtJr6bUVBBVKNck2P3o/1bOkz4DmymgoUMuJNXwYPdcFZilB7sHJJJCdne3GEHVUkXuSUzxkzrjbkk36gPElXQSUQVq1sXPnTjc/Q0fPQFmjoTz1xpTUFgTJ9r1wMu03cQ0AAHiHgAIAALzDMmMAAIopCWdJJA16UAAAgHcIKAAAwDsEFACnjG5uAPH+PiCgACi28DTghw8fLumiAPBE+H2Qe5uAk8UkWQDFppNh6QRT4eZpOllXuI8KgLLXc3L48GH3faDvhehNLYuDgALglGgDPDmZnW8BlF41a9aMfC+c1oCycuVKe/zxx23Dhg1uA6rcZ9BTgtJeBNOnT7d9+/a57ba1nbn2QgjpVMjao2HRokXu1L7aIvvJJ5/Md/tpAH5Tj4lOAa/9Y/LbJRdA2VGhQoVT7jkpdkDRTpUXXnih3XLLLXbttdfmuX/cuHFuD4nZs2e709xqvwPtBbFly5bIKW+1qZTCjbbE1heaNkkaNGiQzZ07Ny5/FIDTT19K8fpiAoCTDijdu3d3l/yo90RbXN9///2Rza20cZJ2sly4cKH16dPHtm7d6nbo1C6O2vZatOHW1VdfbePHj3cbKQEAgLItrqt4tDmQtiPv2rVr5DZtCtS+fXvLzMx01/VT41NhOBE9XkM9a9asyfd5c3Jy3AZD0RcAAFB6xTWgKJyIekyi6Xp4n35qrDr31uK1a9eOPCa3sWPHuqATXho1ahTPYgMAAM8kxXlQRo4c6bZmDi+7d+8u6SIBAIBkCSjhsqKsrKyY23U9vE8/cy9H/O6779zKnoKWJaWmplr16tVjLgAAoPSKa0DRqh2FjIyMjMhtmi+iuSXp6enuun5q+bGWKYeWLVtmx48fd3NVAAAATnoVz8GDB+3jjz+OmRj7/vvvuzkkjRs3tqFDh9rDDz/sznsSLjPWypzwXCktWrSwq666ygYOHGjTpk1zy4yHDBniVviwggcAABQroKxfv94uv/zyyPXhw4e7n/369bNZs2bZiBEj3LlSdF4T9ZR07tzZLSsOz4Eic+bMcaGkS5cukRO16dwpAAAAkhIk4TakGjbSah5NmE3EfJTMGXdbskkfML6kiwAAQNza76RYxQMAAMoWAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAJT+gPL999/bqFGjrGnTpla5cmU799xz7aGHHrIgCCKP0e+jR4+2s846yz2ma9eutmPHjngXBQAAJKm4B5THHnvMpk6dak8//bRt3brVXR83bpw99dRTkcfo+uTJk23atGm2Zs0aq1KlinXr1s2OHDkS7+IAAIAkVD7eT/juu+9az549rUePHu56kyZN7KWXXrK1a9dGek8mTZpk999/v3ucvPDCC1a/fn1buHCh9enTJ95FAgAAZb0HpWPHjpaRkWHbt2931zdt2mSrVq2y7t27u+s7d+60PXv2uGGdUI0aNax9+/aWmZmZ73Pm5ORYdnZ2zAUAAJRece9Buffee12AaN68uZ1xxhluTsqYMWOsb9++7n6FE1GPSTRdD+/LbezYsfbggw/Gu6gAAKCs9KC88sorNmfOHJs7d65t3LjRZs+ebePHj3c/i2vkyJG2f//+yGX37t1xLTMAACjlPSi///3vXS9KOJekdevW9umnn7pekH79+llaWpq7PSsry63iCen6RRddlO9zpqamugsAACgb4t6DcvjwYStXLvZpNdRz/Phx97uWHyukaJ5KSENCWs2Tnp4e7+IAAIAkFPcelGuuucbNOWncuLGdf/759t5779mECRPslltucfenpKTY0KFD7eGHH7ZmzZq5wKLzpjRo0MB69eoV7+IAAIAkFPeAovOdKHDcfvvttnfvXhc8br31VndittCIESPs0KFDNmjQINu3b5917tzZlixZYpUqVYp3cQAAQBJKCaJP8ZokNCSkpcmaMFu9evW4P3/mjLst2aQPGF/SRQAAIG7tN3vxAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAACgbASUzz77zG688UarU6eOVa5c2Vq3bm3r16+P3B8EgY0ePdrOOussd3/Xrl1tx44diSgKAABIQnEPKN9884116tTJKlSoYG+++aZt2bLFnnjiCatVq1bkMePGjbPJkyfbtGnTbM2aNValShXr1q2bHTlyJN7FAQAASah8vJ/wscces0aNGtnMmTMjtzVt2jSm92TSpEl2//33W8+ePd1tL7zwgtWvX98WLlxoffr0iXeRAABAWe9Bef311+2SSy6x6667zurVq2cXX3yxTZ8+PXL/zp07bc+ePW5YJ1SjRg1r3769ZWZm5vucOTk5lp2dHXMBAAClV9wDyieffGJTp061Zs2a2VtvvWW33Xab3XnnnTZ79mx3v8KJqMckmq6H9+U2duxYF2LCi3poAABA6RX3gHL8+HFr06aNPfLII673ZNCgQTZw4EA336S4Ro4cafv3749cdu/eHdcyAwCAUh5QtDKnZcuWMbe1aNHCdu3a5X5PS0tzP7OysmIeo+vhfbmlpqZa9erVYy4AAKD0intA0Qqebdu2xdy2fft2O/vssyMTZhVEMjIyIvdrTolW86Snp8e7OAAAIAnFfRXPsGHDrGPHjm6I5/rrr7e1a9fac8895y6SkpJiQ4cOtYcfftjNU1FgGTVqlDVo0MB69eoV7+IAAIAkFPeA0q5dO1uwYIGbN/LHP/7RBRAtK+7bt2/kMSNGjLBDhw65+Sn79u2zzp0725IlS6xSpUrxLg4AAEhCKYFOTJJkNCSk1TyaMJuI+SiZM+62ZJM+YHxJFwEAgLi13+zFAwAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAACAshdQHn30UUtJSbGhQ4dGbjty5IgNHjzY6tSpY1WrVrXevXtbVlZWoosCAACSREIDyrp16+zZZ5+1Cy64IOb2YcOG2aJFi2z+/Pm2YsUK+/zzz+3aa69NZFEAAEASSVhAOXjwoPXt29emT59utWrVity+f/9+mzFjhk2YMMGuuOIKa9u2rc2cOdPeffddW716daKKAwAAkkjCAoqGcHr06GFdu3aNuX3Dhg127NixmNubN29ujRs3tszMzEQVBwAAJJHyiXjSefPm2caNG90QT2579uyxihUrWs2aNWNur1+/vrsvPzk5Oe4Sys7OTkCpAQBAqe1B2b17t9111102Z84cq1SpUlyec+zYsVajRo3IpVGjRnF5XgAAUEYCioZw9u7da23atLHy5cu7iybCTp482f2unpKjR4/avn37Yv6dVvGkpaXl+5wjR450c1fCi0IQAAAoveI+xNOlSxf74IMPYm7r37+/m2dyzz33uN6PChUqWEZGhlteLNu2bbNdu3ZZenp6vs+ZmprqLgAAoGyIe0CpVq2atWrVKua2KlWquHOehLcPGDDAhg8fbrVr17bq1avbHXfc4cJJhw4d4l0cAACQhBIySfZEJk6caOXKlXM9KJr82q1bN3vmmWdKoigAAKCsBpS333475romz06ZMsVdAAAAcmMvHgAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAASn9AGTt2rLVr186qVatm9erVs169etm2bdtiHnPkyBEbPHiw1alTx6pWrWq9e/e2rKyseBcFAAAkqbgHlBUrVrjwsXr1alu6dKkdO3bMfvrTn9qhQ4cijxk2bJgtWrTI5s+f7x7/+eef27XXXhvvogAAgCRVPt5PuGTJkpjrs2bNcj0pGzZssB//+Me2f/9+mzFjhs2dO9euuOIK95iZM2daixYtXKjp0KFDvIsEAACSTMLnoCiQSO3atd1PBRX1qnTt2jXymObNm1vjxo0tMzMz3+fIycmx7OzsmAsAACi9EhpQjh8/bkOHDrVOnTpZq1at3G179uyxihUrWs2aNWMeW79+fXdfQfNaatSoEbk0atQokcUGAAClOaBoLsrmzZtt3rx5p/Q8I0eOdD0x4WX37t1xKyMAACgDc1BCQ4YMscWLF9vKlSutYcOGkdvT0tLs6NGjtm/fvpheFK3i0X35SU1NdRcAAFA2xL0HJQgCF04WLFhgy5Yts6ZNm8bc37ZtW6tQoYJlZGREbtMy5F27dll6enq8iwMAAJJQ+UQM62iFzmuvvebOhRLOK9HckcqVK7ufAwYMsOHDh7uJs9WrV7c77rjDhRNW8AAAgIQElKlTp7qfP/nJT2Ju11Lim2++2f0+ceJEK1eunDtBm1bodOvWzZ555hneEQAAkJiAoiGeE6lUqZJNmTLFXQAAAHJjLx4AAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHinfEkXAPGROeNuSzbpA8aXdBEAAJ6iBwUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8U76kCwAkk8wZd1uySR8wvqSLAAAnjR4UAADgHQIKAADwDgEFAAB4h4ACAAC8U6IBZcqUKdakSROrVKmStW/f3tauXVuSxQEAAGV9Fc/LL79sw4cPt2nTprlwMmnSJOvWrZtt27bN6tWrV1LFAgDAyvrqPx9WAJZYD8qECRNs4MCB1r9/f2vZsqULKmeeeaY9//zzJVUkAABQlntQjh49ahs2bLCRI0dGbitXrpx17drVMjMz8zw+JyfHXUL79+93P7OzsxNSvkPf/v/XQuIk6v1LpGT8bCRjPQOlSTJ+byTquyN8ziAI/AwoX331lX3//fdWv379mNt1/aOPPsrz+LFjx9qDDz6Y5/ZGjRoltJxIsDueLukSlA3UMwDPvjsOHDhgNWrUSP4zyaqnRfNVQsePH7evv/7a6tSpYykpKaeU5BRydu/ebdWrV49TaZEf6vr0oa5PH+r69KGuS0d9q+dE4aRBgwYnfGyJBJS6devaGWecYVlZWTG363paWlqex6emprpLtJo1a8atPKp8PvCnB3V9+lDXpw91ffpQ18lf3yfqOSnRSbIVK1a0tm3bWkZGRkyviK6np6eXRJEAAIBHSmyIR0M2/fr1s0suucQuvfRSt8z40KFDblUPAAAo20osoPzyl7+0L7/80kaPHm179uyxiy66yJYsWZJn4mwiadjogQceyDN8hPijrk8f6vr0oa5PH+q67NV3SlCUtT4AAACnEXvxAAAA7xBQAACAdwgoAADAOwQUAADgnTIdUKZMmWJNmjSxSpUquR2V165dW9JFSmrakqBdu3ZWrVo1tyN1r1693O7U0Y4cOWKDBw92ZwGuWrWq9e7dO88J+3DyHn30UXdW5aFDh0Zuo67j67PPPrMbb7zR1WflypWtdevWtn79+sj9Wm+gVYlnnXWWu197i+3YsaNEy5yMtA3KqFGjrGnTpq4ezz33XHvooYdi9m6hrotn5cqVds0117izuOr7YuHChTH3F6VedRb3vn37upO36YSpAwYMsIMHD1pCBGXUvHnzgooVKwbPP/988OGHHwYDBw4MatasGWRlZZV00ZJWt27dgpkzZwabN28O3n///eDqq68OGjduHBw8eDDymN/+9rdBo0aNgoyMjGD9+vVBhw4dgo4dO5ZouZPd2rVrgyZNmgQXXHBBcNddd0Vup67j5+uvvw7OPvvs4Oabbw7WrFkTfPLJJ8Fbb70VfPzxx5HHPProo0GNGjWChQsXBps2bQp+9rOfBU2bNg2+/fbbEi17shkzZkxQp06dYPHixcHOnTuD+fPnB1WrVg2efPLJyGOo6+J54403gvvuuy949dVXlfaCBQsWxNxflHq96qqrggsvvDBYvXp18M477wTnnXdecMMNNwSJUGYDyqWXXhoMHjw4cv37778PGjRoEIwdO7ZEy1Wa7N271/1PsGLFCnd93759QYUKFdwXTmjr1q3uMZmZmSVY0uR14MCBoFmzZsHSpUuDyy67LBJQqOv4uueee4LOnTsXeP/x48eDtLS04PHHH4/cpvcgNTU1eOmll05TKUuHHj16BLfcckvMbddee23Qt29f9zt1HR+5A0pR6nXLli3u361bty7ymDfffDNISUkJPvvssyDeyuQQz9GjR23Dhg2u+ypUrlw5dz0zM7NEy1aa7N+/3/2sXbu2+6k6P3bsWEy9N2/e3Bo3bky9F5OGcHr06BFTp0Jdx9frr7/uznp93XXXueHLiy++2KZPnx65f+fOne6Ek9H1rf1GNHRMfZ+cjh07um1Ptm/f7q5v2rTJVq1aZd27d3fXqevEKEq96qeGdfT/QkiPV/u5Zs2auJcpKXYzjrevvvrKjXPmPmutrn/00UclVq7SRHsraT5Ep06drFWrVu42ffi1D1PujR5V77oPJ2fevHm2ceNGW7duXZ77qOv4+uSTT2zq1Klui44//OEPrs7vvPNOV8fasiOs0/y+U6jvk3Pvvfe6nXQVqLWprL6rx4wZ4+Y9CHWdGEWpV/1UQI9Wvnx5dxCaiLovkwEFp+fIfvPmze7IB/GnLdDvuusuW7p0qZvkjcQHbh01PvLII+66elD0+Z42bZoLKIifV155xebMmWNz5861888/395//313sKOJndR12VImh3jq1q3rknnuFQ26npaWVmLlKi2GDBliixcvtuXLl1vDhg0jt6tuNby2b9++mMdT7ydPQzh79+61Nm3auCMYXVasWGGTJ092v+uoh7qOH61qaNmyZcxtLVq0sF27drnfwzrlO+XU/f73v3e9KH369HErpW666SYbNmyYWyUo1HViFKVe9VPfO9G+++47t7InEXVfJgOKumXbtm3rxjmjj5B0PT09vUTLlsw070rhZMGCBbZs2TK3TDCa6rxChQox9a5lyPqSp95PTpcuXeyDDz5wR5fhRUf46gYPf6eu40dDlbmXzGuOxNlnn+1+12ddX9DR9a1hCo3LU98n5/Dhw25OQzQdUOo7WqjrxChKveqnDnp0gBTSd73eG81VibugDC8z1uzkWbNmuZnJgwYNcsuM9+zZU9JFS1q33XabW6L29ttvB1988UXkcvjw4Zilr1p6vGzZMrf0NT093V1w6qJX8Qh1Hd+l3OXLl3dLYHfs2BHMmTMnOPPMM4MXX3wxZommvkNee+214B//+EfQs2dPlr4WQ79+/YIf/OAHkWXGWhJbt27dYMSIEZHHUNfFX/X33nvvuYua/wkTJrjfP/300yLXq5YZX3zxxW65/apVq9wqQpYZJ8BTTz3lvsB1PhQtO9a6bhSfPvD5XXRulJA+6LfffntQq1Yt9wX/85//3IUYxD+gUNfxtWjRoqBVq1buwKZ58+bBc889F3O/lmmOGjUqqF+/vntMly5dgm3btpVYeZNVdna2+xzru7lSpUrBOeec487dkZOTE3kMdV08y5cvz/c7WqGwqPX6r3/9ywUSnZumevXqQf/+/V3wSYQU/Sf+/TIAAADFVybnoAAAAL8RUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAABgvvl/GCXMjUDplB8AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "plt.hist(global_values[:len(global_values)//2], alpha=0.5, label='Neps HB with uniform sampler',bins=10)\n", + "plt.hist(global_values[len(global_values)//2:], alpha=0.5, label='HB with uniform sampler',bins=10)\n", + "# plt.hist([v+2 for n,v in enumerate(global_values) if n % 4 == 2], alpha=0.5, label='Neps HB with prior sampler',bins=10)\n", + "# plt.hist([v+3 for n,v in enumerate(global_values) if n % 4 == 3], alpha=0.5, label='HB with prior sampler',bins=10)\n", + "plt.legend()\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "70b97bfb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Count of 1 in algo 0: 81\n", + "Count of 3 in algo 0: 32\n", + "Count of 11 in algo 0: 9\n", + "Count of 33 in algo 0: 3\n", + "Count of 100 in algo 0: 1\n", + "Count of 1 in algo 1: 81\n", + "Count of 3 in algo 1: 32\n", + "Count of 11 in algo 1: 9\n", + "Count of 33 in algo 1: 3\n", + "Count of 100 in algo 1: 1\n" + ] + } + ], + "source": [ + "n_algos = 2\n", + "for i in range(n_algos):\n", + " for j in [v for v in range(1000) if v in global_values]:\n", + " le = len(global_values)//2\n", + " print(f\"Count of {j:<3} in algo {i}: \", global_values[le*i:le*(i+1)].count(j))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "938adc12", + "metadata": {}, + "outputs": [], + "source": [ + "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled, Integer, Fidelity\n", + "import neps\n", + "\n", + "# Define the NEPS space for the neural network architecture\n", + "class SimpleSpace(PipelineSpace):\n", + " i1 = Fidelity(Integer(1,100))\n", + " i2 = Integer(0,50)\n", + " i3 = Categorical(['a','b','c'])\n", + "global_values = []\n", + "def evaluate_pipeline(i1, i2, i3, *args, **kwargs):\n", + " # Dummy evaluation function\n", + " global_values.append(i1)\n", + " return {\"objective_to_minimize\": -i2/50 + i1,\n", + " \"cost\": i1}" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "bfd8f206", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0\n", + "0\n", + "Fidelities to spend: 1\n", + "Fidelities to spend: 2\n", + "0\n", + "0\n", + "Fidelities to spend: 3\n", + "0\n", + "0\n", + "Fidelities to spend: 4\n", + "0\n", + "0\n", + "Fidelities to spend: 5\n", + "0\n", + "0\n", + "Fidelities to spend: 6\n", + "0\n", + "0\n", + "Fidelities to spend: 7\n", + "0\n", + "0\n", + "Fidelities to spend: 8\n", + "0\n", + "0\n", + "Fidelities to spend: 9\n", + "0\n", + "0\n", + "Fidelities to spend: 10\n", + "0\n", + "0\n", + "Fidelities to spend: 11\n", + "0\n", + "0\n", + "Fidelities to spend: 12\n", + "0\n", + "0\n", + "Fidelities to spend: 13\n", + "0\n", + "0\n", + "Fidelities to spend: 14\n", + "0\n", + "0\n", + "Fidelities to spend: 15\n", + "0\n", + "0\n", + "Fidelities to spend: 16\n", + "0\n", + "0\n", + "Fidelities to spend: 17\n", + "0\n", + "0\n", + "Fidelities to spend: 18\n", + "0\n", + "0\n", + "Fidelities to spend: 19\n", + "0\n", + "0\n", + "Fidelities to spend: 20\n", + "0\n", + "0\n", + "Fidelities to spend: 21\n", + "0\n", + "0\n", + "Fidelities to spend: 22\n", + "0\n", + "0\n", + "Fidelities to spend: 23\n", + "0\n", + "0\n", + "Fidelities to spend: 24\n", + "0\n", + "0\n", + "Fidelities to spend: 25\n", + "0\n", + "0\n", + "Fidelities to spend: 26\n", + "0\n", + "0\n", + "Fidelities to spend: 27\n", + "0\n", + "0\n", + "Fidelities to spend: 28\n", + "0\n", + "0\n", + "Fidelities to spend: 29\n", + "0\n", + "0\n", + "Fidelities to spend: 30\n", + "0\n", + "0\n", + "Fidelities to spend: 31\n", + "0\n", + "0\n", + "Fidelities to spend: 32\n", + "0\n", + "0\n", + "Fidelities to spend: 33\n", + "0\n", + "0\n", + "Fidelities to spend: 34\n", + "0\n", + "0\n", + "Fidelities to spend: 35\n", + "0\n", + "0\n", + "Fidelities to spend: 36\n", + "0\n", + "0\n", + "Fidelities to spend: 37\n", + "0\n", + "0\n", + "Fidelities to spend: 38\n", + "0\n", + "0\n", + "Fidelities to spend: 39\n", + "0\n", + "0\n", + "Fidelities to spend: 40\n", + "0\n", + "0\n", + "Fidelities to spend: 41\n", + "0\n", + "0\n", + "Fidelities to spend: 42\n", + "0\n", + "0\n", + "Fidelities to spend: 43\n", + "0\n", + "0\n", + "Fidelities to spend: 44\n", + "0\n", + "0\n", + "Fidelities to spend: 45\n", + "0\n", + "0\n", + "Fidelities to spend: 46\n", + "0\n", + "0\n", + "Fidelities to spend: 47\n", + "0\n", + "0\n", + "Fidelities to spend: 48\n", + "0\n", + "0\n", + "Fidelities to spend: 49\n", + "0\n", + "0\n", + "Fidelities to spend: 50\n", + "0\n", + "0\n", + "Fidelities to spend: 51\n", + "0\n", + "0\n", + "Fidelities to spend: 52\n", + "0\n", + "0\n", + "Fidelities to spend: 53\n", + "0\n", + "0\n", + "Fidelities to spend: 54\n", + "0\n", + "0\n", + "Fidelities to spend: 55\n", + "0\n", + "0\n", + "Fidelities to spend: 56\n", + "0\n", + "0\n", + "Fidelities to spend: 57\n", + "0\n", + "0\n", + "Fidelities to spend: 58\n", + "0\n", + "0\n", + "Fidelities to spend: 59\n", + "0\n", + "0\n", + "Fidelities to spend: 60\n", + "0\n", + "0\n", + "Fidelities to spend: 61\n", + "0\n", + "0\n", + "Fidelities to spend: 62\n", + "0\n", + "0\n", + "Fidelities to spend: 63\n", + "0\n", + "0\n", + "Fidelities to spend: 64\n", + "0\n", + "0\n", + "Fidelities to spend: 65\n", + "0\n", + "0\n", + "Fidelities to spend: 66\n", + "0\n", + "0\n", + "Fidelities to spend: 67\n", + "0\n", + "0\n", + "Fidelities to spend: 68\n", + "0\n", + "0\n", + "Fidelities to spend: 69\n", + "0\n", + "0\n", + "Fidelities to spend: 70\n", + "0\n", + "0\n", + "Fidelities to spend: 71\n", + "0\n", + "0\n", + "Fidelities to spend: 72\n", + "0\n", + "0\n", + "Fidelities to spend: 73\n", + "0\n", + "0\n", + "Fidelities to spend: 74\n", + "0\n", + "0\n", + "Fidelities to spend: 75\n", + "0\n", + "0\n", + "Fidelities to spend: 76\n", + "0\n", + "0\n", + "Fidelities to spend: 77\n", + "0\n", + "0\n", + "Fidelities to spend: 78\n", + "0\n", + "0\n", + "Fidelities to spend: 79\n", + "0\n", + "0\n", + "Fidelities to spend: 80\n", + "0\n", + "0\n", + "Fidelities to spend: 81\n", + "0\n", + "0\n", + "Fidelities to spend: 82\n", + "1\n", + "1\n", + "Fidelities to spend: 83\n", + "Fidelities to spend: 84\n", + "Fidelities to spend: 85\n", + "1\n", + "1\n", + "Fidelities to spend: 86\n", + "Fidelities to spend: 87\n", + "Fidelities to spend: 88\n", + "1\n", + "1\n", + "Fidelities to spend: 89\n", + "Fidelities to spend: 90\n", + "Fidelities to spend: 91\n", + "1\n", + "1\n", + "Fidelities to spend: 92\n", + "Fidelities to spend: 93\n", + "Fidelities to spend: 94\n", + "1\n", + "1\n", + "Fidelities to spend: 95\n", + "Fidelities to spend: 96\n", + "Fidelities to spend: 97\n", + "1\n", + "1\n", + "Fidelities to spend: 98\n", + "Fidelities to spend: 99\n", + "Fidelities to spend: 100\n", + "1\n", + "1\n", + "Fidelities to spend: 101\n", + "Fidelities to spend: 102\n", + "Fidelities to spend: 103\n", + "1\n", + "1\n", + "Fidelities to spend: 104\n", + "Fidelities to spend: 105\n", + "Fidelities to spend: 106\n", + "1\n", + "1\n", + "Fidelities to spend: 107\n", + "Fidelities to spend: 108\n", + "Fidelities to spend: 109\n", + "1\n", + "1\n", + "Fidelities to spend: 110\n", + "Fidelities to spend: 111\n", + "Fidelities to spend: 112\n", + "1\n", + "1\n", + "Fidelities to spend: 113\n", + "Fidelities to spend: 114\n", + "Fidelities to spend: 115\n", + "1\n", + "1\n", + "Fidelities to spend: 116\n", + "Fidelities to spend: 117\n", + "Fidelities to spend: 118\n", + "1\n", + "1\n", + "Fidelities to spend: 119\n", + "Fidelities to spend: 120\n", + "Fidelities to spend: 121\n", + "1\n", + "1\n", + "Fidelities to spend: 122\n", + "Fidelities to spend: 123\n", + "Fidelities to spend: 124\n", + "1\n", + "1\n", + "Fidelities to spend: 125\n", + "Fidelities to spend: 126\n", + "Fidelities to spend: 127\n", + "1\n", + "1\n", + "Fidelities to spend: 128\n", + "Fidelities to spend: 129\n", + "Fidelities to spend: 130\n", + "1\n", + "1\n", + "Fidelities to spend: 131\n", + "Fidelities to spend: 132\n", + "Fidelities to spend: 133\n", + "1\n", + "1\n", + "Fidelities to spend: 134\n", + "Fidelities to spend: 135\n", + "Fidelities to spend: 136\n", + "1\n", + "1\n", + "Fidelities to spend: 137\n", + "Fidelities to spend: 138\n", + "Fidelities to spend: 139\n", + "1\n", + "1\n", + "Fidelities to spend: 140\n", + "Fidelities to spend: 141\n", + "Fidelities to spend: 142\n", + "1\n", + "1\n", + "Fidelities to spend: 143\n", + "Fidelities to spend: 144\n", + "Fidelities to spend: 145\n", + "1\n", + "1\n", + "Fidelities to spend: 146\n", + "Fidelities to spend: 147\n", + "Fidelities to spend: 148\n", + "1\n", + "1\n", + "Fidelities to spend: 149\n", + "Fidelities to spend: 150\n", + "Fidelities to spend: 151\n", + "1\n", + "1\n", + "Fidelities to spend: 152\n", + "Fidelities to spend: 153\n", + "Fidelities to spend: 154\n", + "1\n", + "1\n", + "Fidelities to spend: 155\n", + "Fidelities to spend: 156\n", + "Fidelities to spend: 157\n", + "1\n", + "1\n", + "Fidelities to spend: 158\n", + "Fidelities to spend: 159\n", + "Fidelities to spend: 160\n", + "1\n", + "1\n", + "Fidelities to spend: 161\n", + "Fidelities to spend: 162\n", + "Fidelities to spend: 163\n", + "2\n", + "2\n", + "Fidelities to spend: 164\n", + "Fidelities to spend: 165\n", + "Fidelities to spend: 166\n", + "Fidelities to spend: 167\n", + "Fidelities to spend: 168\n", + "Fidelities to spend: 169\n", + "Fidelities to spend: 170\n", + "Fidelities to spend: 171\n", + "Fidelities to spend: 172\n", + "Fidelities to spend: 173\n", + "Fidelities to spend: 174\n", + "2\n", + "2\n", + "Fidelities to spend: 175\n", + "Fidelities to spend: 176\n", + "Fidelities to spend: 177\n", + "Fidelities to spend: 178\n", + "Fidelities to spend: 179\n", + "Fidelities to spend: 180\n", + "Fidelities to spend: 181\n", + "Fidelities to spend: 182\n", + "Fidelities to spend: 183\n", + "Fidelities to spend: 184\n", + "Fidelities to spend: 185\n", + "2\n", + "2\n", + "Fidelities to spend: 186\n", + "Fidelities to spend: 187\n", + "Fidelities to spend: 188\n", + "Fidelities to spend: 189\n", + "Fidelities to spend: 190\n", + "Fidelities to spend: 191\n", + "Fidelities to spend: 192\n", + "Fidelities to spend: 193\n", + "Fidelities to spend: 194\n", + "Fidelities to spend: 195\n", + "Fidelities to spend: 196\n", + "2\n", + "2\n", + "Fidelities to spend: 197\n", + "Fidelities to spend: 198\n", + "Fidelities to spend: 199\n", + "Fidelities to spend: 200\n", + "Fidelities to spend: 201\n", + "Fidelities to spend: 202\n", + "Fidelities to spend: 203\n", + "Fidelities to spend: 204\n", + "Fidelities to spend: 205\n", + "Fidelities to spend: 206\n", + "Fidelities to spend: 207\n", + "2\n", + "2\n", + "Fidelities to spend: 208\n", + "Fidelities to spend: 209\n", + "Fidelities to spend: 210\n", + "Fidelities to spend: 211\n", + "Fidelities to spend: 212\n", + "Fidelities to spend: 213\n", + "Fidelities to spend: 214\n", + "Fidelities to spend: 215\n", + "Fidelities to spend: 216\n", + "Fidelities to spend: 217\n", + "Fidelities to spend: 218\n", + "2\n", + "2\n", + "Fidelities to spend: 219\n", + "Fidelities to spend: 220\n", + "Fidelities to spend: 221\n", + "Fidelities to spend: 222\n", + "Fidelities to spend: 223\n", + "Fidelities to spend: 224\n", + "Fidelities to spend: 225\n", + "Fidelities to spend: 226\n", + "Fidelities to spend: 227\n", + "Fidelities to spend: 228\n", + "Fidelities to spend: 229\n", + "2\n", + "2\n", + "Fidelities to spend: 230\n", + "Fidelities to spend: 231\n", + "Fidelities to spend: 232\n", + "Fidelities to spend: 233\n", + "Fidelities to spend: 234\n", + "Fidelities to spend: 235\n", + "Fidelities to spend: 236\n", + "Fidelities to spend: 237\n", + "Fidelities to spend: 238\n", + "Fidelities to spend: 239\n", + "Fidelities to spend: 240\n", + "2\n", + "2\n", + "Fidelities to spend: 241\n", + "Fidelities to spend: 242\n", + "Fidelities to spend: 243\n", + "Fidelities to spend: 244\n", + "Fidelities to spend: 245\n", + "Fidelities to spend: 246\n", + "Fidelities to spend: 247\n", + "Fidelities to spend: 248\n", + "Fidelities to spend: 249\n", + "Fidelities to spend: 250\n", + "Fidelities to spend: 251\n", + "2\n", + "2\n", + "Fidelities to spend: 252\n", + "Fidelities to spend: 253\n", + "Fidelities to spend: 254\n", + "Fidelities to spend: 255\n", + "Fidelities to spend: 256\n", + "Fidelities to spend: 257\n", + "Fidelities to spend: 258\n", + "Fidelities to spend: 259\n", + "Fidelities to spend: 260\n", + "Fidelities to spend: 261\n", + "Fidelities to spend: 262\n", + "3\n", + "3\n", + "Fidelities to spend: 263\n", + "Fidelities to spend: 264\n", + "Fidelities to spend: 265\n", + "Fidelities to spend: 266\n", + "Fidelities to spend: 267\n", + "Fidelities to spend: 268\n", + "Fidelities to spend: 269\n", + "Fidelities to spend: 270\n", + "Fidelities to spend: 271\n", + "Fidelities to spend: 272\n", + "Fidelities to spend: 273\n", + "Fidelities to spend: 274\n", + "Fidelities to spend: 275\n", + "Fidelities to spend: 276\n", + "Fidelities to spend: 277\n", + "Fidelities to spend: 278\n", + "Fidelities to spend: 279\n", + "Fidelities to spend: 280\n", + "Fidelities to spend: 281\n", + "Fidelities to spend: 282\n", + "Fidelities to spend: 283\n", + "Fidelities to spend: 284\n", + "Fidelities to spend: 285\n", + "Fidelities to spend: 286\n", + "Fidelities to spend: 287\n", + "Fidelities to spend: 288\n", + "Fidelities to spend: 289\n", + "Fidelities to spend: 290\n", + "Fidelities to spend: 291\n", + "Fidelities to spend: 292\n", + "Fidelities to spend: 293\n", + "Fidelities to spend: 294\n", + "Fidelities to spend: 295\n", + "3\n", + "3\n", + "Fidelities to spend: 296\n", + "Fidelities to spend: 297\n", + "Fidelities to spend: 298\n", + "Fidelities to spend: 299\n", + "Fidelities to spend: 300\n", + "Fidelities to spend: 301\n", + "Fidelities to spend: 302\n", + "Fidelities to spend: 303\n", + "Fidelities to spend: 304\n", + "Fidelities to spend: 305\n", + "Fidelities to spend: 306\n", + "Fidelities to spend: 307\n", + "Fidelities to spend: 308\n", + "Fidelities to spend: 309\n", + "Fidelities to spend: 310\n", + "Fidelities to spend: 311\n", + "Fidelities to spend: 312\n", + "Fidelities to spend: 313\n", + "Fidelities to spend: 314\n", + "Fidelities to spend: 315\n", + "Fidelities to spend: 316\n", + "Fidelities to spend: 317\n", + "Fidelities to spend: 318\n", + "Fidelities to spend: 319\n", + "Fidelities to spend: 320\n", + "Fidelities to spend: 321\n", + "Fidelities to spend: 322\n", + "Fidelities to spend: 323\n", + "Fidelities to spend: 324\n", + "Fidelities to spend: 325\n", + "Fidelities to spend: 326\n", + "Fidelities to spend: 327\n", + "Fidelities to spend: 328\n", + "3\n", + "3\n", + "Fidelities to spend: 329\n", + "Fidelities to spend: 330\n", + "Fidelities to spend: 331\n", + "Fidelities to spend: 332\n", + "Fidelities to spend: 333\n", + "Fidelities to spend: 334\n", + "Fidelities to spend: 335\n", + "Fidelities to spend: 336\n", + "Fidelities to spend: 337\n", + "Fidelities to spend: 338\n", + "Fidelities to spend: 339\n", + "Fidelities to spend: 340\n", + "Fidelities to spend: 341\n", + "Fidelities to spend: 342\n", + "Fidelities to spend: 343\n", + "Fidelities to spend: 344\n", + "Fidelities to spend: 345\n", + "Fidelities to spend: 346\n", + "Fidelities to spend: 347\n", + "Fidelities to spend: 348\n", + "Fidelities to spend: 349\n", + "Fidelities to spend: 350\n", + "Fidelities to spend: 351\n", + "Fidelities to spend: 352\n", + "Fidelities to spend: 353\n", + "Fidelities to spend: 354\n", + "Fidelities to spend: 355\n", + "Fidelities to spend: 356\n", + "Fidelities to spend: 357\n", + "Fidelities to spend: 358\n", + "Fidelities to spend: 359\n", + "Fidelities to spend: 360\n", + "Fidelities to spend: 361\n", + "4\n", + "4\n", + "Fidelities to spend: 362\n", + "Fidelities to spend: 363\n", + "Fidelities to spend: 364\n", + "Fidelities to spend: 365\n", + "Fidelities to spend: 366\n", + "Fidelities to spend: 367\n", + "Fidelities to spend: 368\n", + "Fidelities to spend: 369\n", + "Fidelities to spend: 370\n", + "Fidelities to spend: 371\n", + "Fidelities to spend: 372\n", + "Fidelities to spend: 373\n", + "Fidelities to spend: 374\n", + "Fidelities to spend: 375\n", + "Fidelities to spend: 376\n", + "Fidelities to spend: 377\n", + "Fidelities to spend: 378\n", + "Fidelities to spend: 379\n", + "Fidelities to spend: 380\n", + "Fidelities to spend: 381\n", + "Fidelities to spend: 382\n", + "Fidelities to spend: 383\n", + "Fidelities to spend: 384\n", + "Fidelities to spend: 385\n", + "Fidelities to spend: 386\n", + "Fidelities to spend: 387\n", + "Fidelities to spend: 388\n", + "Fidelities to spend: 389\n", + "Fidelities to spend: 390\n", + "Fidelities to spend: 391\n", + "Fidelities to spend: 392\n", + "Fidelities to spend: 393\n", + "Fidelities to spend: 394\n", + "Fidelities to spend: 395\n", + "Fidelities to spend: 396\n", + "Fidelities to spend: 397\n", + "Fidelities to spend: 398\n", + "Fidelities to spend: 399\n", + "Fidelities to spend: 400\n", + "Fidelities to spend: 401\n", + "Fidelities to spend: 402\n", + "Fidelities to spend: 403\n", + "Fidelities to spend: 404\n", + "Fidelities to spend: 405\n", + "Fidelities to spend: 406\n", + "Fidelities to spend: 407\n", + "Fidelities to spend: 408\n", + "Fidelities to spend: 409\n", + "Fidelities to spend: 410\n", + "Fidelities to spend: 411\n", + "Fidelities to spend: 412\n", + "Fidelities to spend: 413\n", + "Fidelities to spend: 414\n", + "Fidelities to spend: 415\n", + "Fidelities to spend: 416\n", + "Fidelities to spend: 417\n", + "Fidelities to spend: 418\n", + "Fidelities to spend: 419\n", + "Fidelities to spend: 420\n", + "Fidelities to spend: 421\n", + "Fidelities to spend: 422\n", + "Fidelities to spend: 423\n", + "Fidelities to spend: 424\n", + "Fidelities to spend: 425\n", + "Fidelities to spend: 426\n", + "Fidelities to spend: 427\n", + "Fidelities to spend: 428\n", + "Fidelities to spend: 429\n", + "Fidelities to spend: 430\n", + "Fidelities to spend: 431\n", + "Fidelities to spend: 432\n", + "Fidelities to spend: 433\n", + "Fidelities to spend: 434\n", + "Fidelities to spend: 435\n", + "Fidelities to spend: 436\n", + "Fidelities to spend: 437\n", + "Fidelities to spend: 438\n", + "Fidelities to spend: 439\n", + "Fidelities to spend: 440\n", + "Fidelities to spend: 441\n", + "Fidelities to spend: 442\n", + "Fidelities to spend: 443\n", + "Fidelities to spend: 444\n", + "Fidelities to spend: 445\n", + "Fidelities to spend: 446\n", + "Fidelities to spend: 447\n", + "Fidelities to spend: 448\n", + "Fidelities to spend: 449\n", + "Fidelities to spend: 450\n", + "Fidelities to spend: 451\n", + "Fidelities to spend: 452\n", + "Fidelities to spend: 453\n", + "Fidelities to spend: 454\n", + "Fidelities to spend: 455\n", + "Fidelities to spend: 456\n", + "Fidelities to spend: 457\n", + "Fidelities to spend: 458\n", + "Fidelities to spend: 459\n", + "Fidelities to spend: 460\n", + "Fidelities to spend: 461\n", + "1\n", + "1\n", + "Fidelities to spend: 462\n", + "Fidelities to spend: 463\n", + "Fidelities to spend: 464\n", + "1\n", + "1\n", + "Fidelities to spend: 465\n", + "Fidelities to spend: 466\n", + "Fidelities to spend: 467\n", + "1\n", + "1\n", + "Fidelities to spend: 468\n", + "Fidelities to spend: 469\n", + "Fidelities to spend: 470\n", + "1\n", + "1\n", + "Fidelities to spend: 471\n", + "Fidelities to spend: 472\n", + "Fidelities to spend: 473\n", + "1\n", + "1\n", + "Fidelities to spend: 474\n", + "Fidelities to spend: 475\n", + "Fidelities to spend: 476\n", + "1\n", + "1\n", + "Fidelities to spend: 477\n", + "Fidelities to spend: 478\n", + "Fidelities to spend: 479\n", + "1\n", + "1\n", + "Fidelities to spend: 480\n", + "Fidelities to spend: 481\n", + "Fidelities to spend: 482\n", + "1\n", + "1\n", + "Fidelities to spend: 483\n", + "Fidelities to spend: 484\n", + "Fidelities to spend: 485\n", + "1\n", + "1\n", + "Fidelities to spend: 486\n", + "Fidelities to spend: 487\n", + "Fidelities to spend: 488\n", + "1\n", + "1\n", + "Fidelities to spend: 489\n", + "Fidelities to spend: 490\n", + "Fidelities to spend: 491\n", + "1\n", + "1\n", + "Fidelities to spend: 492\n", + "Fidelities to spend: 493\n", + "Fidelities to spend: 494\n", + "1\n", + "1\n", + "Fidelities to spend: 495\n", + "Fidelities to spend: 496\n", + "Fidelities to spend: 497\n", + "1\n", + "1\n", + "Fidelities to spend: 498\n", + "Fidelities to spend: 499\n", + "Fidelities to spend: 500\n", + "1\n", + "1\n", + "Fidelities to spend: 501\n", + "Fidelities to spend: 502\n", + "Fidelities to spend: 503\n", + "1\n", + "1\n", + "Fidelities to spend: 504\n", + "Fidelities to spend: 505\n", + "Fidelities to spend: 506\n", + "1\n", + "1\n", + "Fidelities to spend: 507\n", + "Fidelities to spend: 508\n", + "Fidelities to spend: 509\n", + "1\n", + "1\n", + "Fidelities to spend: 510\n", + "Fidelities to spend: 511\n", + "Fidelities to spend: 512\n", + "1\n", + "1\n", + "Fidelities to spend: 513\n", + "Fidelities to spend: 514\n", + "Fidelities to spend: 515\n", + "1\n", + "1\n", + "Fidelities to spend: 516\n", + "Fidelities to spend: 517\n", + "Fidelities to spend: 518\n", + "1\n", + "1\n", + "Fidelities to spend: 519\n", + "Fidelities to spend: 520\n", + "Fidelities to spend: 521\n", + "1\n", + "1\n", + "Fidelities to spend: 522\n", + "Fidelities to spend: 523\n", + "Fidelities to spend: 524\n", + "1\n", + "1\n", + "Fidelities to spend: 525\n", + "Fidelities to spend: 526\n", + "Fidelities to spend: 527\n", + "1\n", + "1\n", + "Fidelities to spend: 528\n", + "Fidelities to spend: 529\n", + "Fidelities to spend: 530\n", + "1\n", + "1\n", + "Fidelities to spend: 531\n", + "Fidelities to spend: 532\n", + "Fidelities to spend: 533\n", + "1\n", + "1\n", + "Fidelities to spend: 534\n", + "Fidelities to spend: 535\n", + "Fidelities to spend: 536\n", + "1\n", + "1\n", + "Fidelities to spend: 537\n", + "Fidelities to spend: 538\n", + "Fidelities to spend: 539\n", + "1\n", + "1\n", + "Fidelities to spend: 540\n", + "Fidelities to spend: 541\n", + "Fidelities to spend: 542\n", + "2\n", + "2\n", + "Fidelities to spend: 543\n", + "Fidelities to spend: 544\n", + "Fidelities to spend: 545\n", + "Fidelities to spend: 546\n", + "Fidelities to spend: 547\n", + "Fidelities to spend: 548\n", + "Fidelities to spend: 549\n", + "Fidelities to spend: 550\n", + "Fidelities to spend: 551\n", + "Fidelities to spend: 552\n", + "Fidelities to spend: 553\n", + "2\n", + "2\n", + "Fidelities to spend: 554\n", + "Fidelities to spend: 555\n", + "Fidelities to spend: 556\n", + "Fidelities to spend: 557\n", + "Fidelities to spend: 558\n", + "Fidelities to spend: 559\n", + "Fidelities to spend: 560\n", + "Fidelities to spend: 561\n", + "Fidelities to spend: 562\n", + "Fidelities to spend: 563\n", + "Fidelities to spend: 564\n", + "2\n", + "2\n", + "Fidelities to spend: 565\n", + "Fidelities to spend: 566\n", + "Fidelities to spend: 567\n", + "Fidelities to spend: 568\n", + "Fidelities to spend: 569\n", + "Fidelities to spend: 570\n", + "Fidelities to spend: 571\n", + "Fidelities to spend: 572\n", + "Fidelities to spend: 573\n", + "Fidelities to spend: 574\n", + "Fidelities to spend: 575\n", + "2\n", + "2\n", + "Fidelities to spend: 576\n", + "Fidelities to spend: 577\n", + "Fidelities to spend: 578\n", + "Fidelities to spend: 579\n", + "Fidelities to spend: 580\n", + "Fidelities to spend: 581\n", + "Fidelities to spend: 582\n", + "Fidelities to spend: 583\n", + "Fidelities to spend: 584\n", + "Fidelities to spend: 585\n", + "Fidelities to spend: 586\n", + "2\n", + "2\n", + "Fidelities to spend: 587\n", + "Fidelities to spend: 588\n", + "Fidelities to spend: 589\n", + "Fidelities to spend: 590\n", + "Fidelities to spend: 591\n", + "Fidelities to spend: 592\n", + "Fidelities to spend: 593\n", + "Fidelities to spend: 594\n", + "Fidelities to spend: 595\n", + "Fidelities to spend: 596\n", + "Fidelities to spend: 597\n", + "2\n", + "2\n", + "Fidelities to spend: 598\n", + "Fidelities to spend: 599\n" + ] + } + ], + "source": [ + "import neps\n", + "from neps import algorithms\n", + "from functools import partial\n", + "import matplotlib.pyplot as plt\n", + "\n", + "global_values = []\n", + "eta=3\n", + "\n", + "neps.run(\n", + " evaluate_pipeline,\n", + " SimpleSpace(),\n", + " root_directory=\"neps_test_runs/algo_tests4\",\n", + " overwrite_root_directory=True,\n", + " optimizer=partial(algorithms.neps_hyperband, eta=eta),\n", + " fidelities_to_spend=1\n", + ")\n", + "\n", + "neps.run(\n", + " evaluate_pipeline,\n", + " SimpleSpace(),\n", + " root_directory=\"neps_test_runs/algo_tests5\",\n", + " overwrite_root_directory=True,\n", + " optimizer=partial(algorithms.hyperband, eta=eta),\n", + " fidelities_to_spend=1\n", + ")\n", + "\n", + "\n", + "for f in range(1,600):\n", + " print(f\"Fidelities to spend: {f}\")\n", + " # partial(algorithms.neps_hyperband, sampler=\"prior\", eta=eta)]: \n", + " # partial(algorithms.hyperband, sampler=\"prior\", eta=eta)]:\n", + " neps.run(\n", + " evaluate_pipeline,\n", + " SimpleSpace(),\n", + " root_directory=\"neps_test_runs/algo_tests4\",\n", + " overwrite_root_directory=False,\n", + " optimizer=partial(algorithms.neps_hyperband, eta=eta),\n", + " fidelities_to_spend=f\n", + " )\n", + "\n", + " neps.run(\n", + " evaluate_pipeline,\n", + " SimpleSpace(),\n", + " root_directory=\"neps_test_runs/algo_tests5\",\n", + " overwrite_root_directory=False,\n", + " optimizer=partial(algorithms.hyperband, eta=eta),\n", + " fidelities_to_spend=f\n", + " )\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "eb12134d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Count of 1 in algo 0: 81\n", + "Count of 3 in algo 0: 54\n", + "Count of 11 in algo 0: 15\n", + "Count of 33 in algo 0: 3\n", + "Count of 100 in algo 0: 1\n", + "Count of 1 in algo 1: 81\n", + "Count of 3 in algo 1: 54\n", + "Count of 11 in algo 1: 15\n", + "Count of 33 in algo 1: 3\n", + "Count of 100 in algo 1: 1\n" + ] + } + ], + "source": [ + "n_algos = 2\n", + "for i in range(n_algos):\n", + " for j in [v for v in range(1000) if v in global_values]:\n", + " print(f\"Count of {j:<3} in algo {i}: \", [v for n,v in enumerate(global_values) if n % n_algos == i].count(j))\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "neural-pipeline-search (3.13.1)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/neps_examples/test_files/priors_test.ipynb b/neps_examples/test_files/priors_test.ipynb new file mode 100644 index 000000000..3145ecd3f --- /dev/null +++ b/neps_examples/test_files/priors_test.ipynb @@ -0,0 +1,400 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "180fcb7f", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "c:\\Users\\Amega\\Git\\neps\\.venv\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Original pipeline:\n", + "PipelineSpace SimpleSpace with parameters:\n", + "\tint_param1 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.LOW)\n", + "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", + "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", + "\n", + "==================================================\n", + "After removing 'int_param1' (in-place):\n", + "PipelineSpace SimpleSpace with parameters:\n", + "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", + "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", + "\n", + "==================================================\n", + "After adding 'int_param1' (in-place):\n", + "PipelineSpace SimpleSpace with parameters:\n", + "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", + "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", + "\tint_param1 = Float(0.0, 1.0)\n", + "\n", + "==================================================\n", + "After removing 'int_param1' (in-place):\n", + "PipelineSpace SimpleSpace with parameters:\n", + "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", + "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "import torch\n", + "import torch.nn as nn\n", + "import neps\n", + "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled\n", + "\n", + "# Define the NEPS space for the neural network architecture\n", + "class SimpleSpace(PipelineSpace):\n", + " int_param1 = neps.Integer(1,100, prior=50, prior_confidence=\"low\")\n", + " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\")\n", + " int_param3 = neps.Integer(1,100, prior=50, prior_confidence=\"high\")\n", + "\n", + "class OtherSpace(PipelineSpace):\n", + " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\", log=False)\n", + "\n", + "# Test in-place operations\n", + "pipeline = SimpleSpace()\n", + "print(\"Original pipeline:\")\n", + "print(pipeline)\n", + "\n", + "print(\"\\n\" + \"=\"*50)\n", + "print(\"After removing 'int_param1' (in-place):\")\n", + "pipeline = pipeline.remove(\"int_param1\")\n", + "print(pipeline)\n", + "\n", + "print(\"\\n\" + \"=\"*50)\n", + "print(\"After adding 'int_param1' (in-place):\")\n", + "pipeline = pipeline.add(neps.Float(0.0, 1.0), \"int_param1\")\n", + "print(pipeline)\n", + "\n", + "print(\"\\n\" + \"=\"*50)\n", + "print(\"After removing 'int_param1' (in-place):\")\n", + "pipeline = pipeline.remove(\"int_param1\")\n", + "print(pipeline)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "84c7766b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + " Converted categorical_param\n", + "{'choices': ('a', 'b', 'c'),\n", + " 'prior': 0,\n", + " 'prior_confidence': }\n", + "\n", + " Pipeline categorical_param\n", + "{'choices': ('a', 'b', 'c'),\n", + " 'prior': 0,\n", + " 'prior_confidence': }\n", + "\n", + " Converted fidelity_param\n", + "{'min_value': 1, 'max_value': 10, 'log': False, 'prior': , 'prior_confidence': }\n", + "\n", + " Pipeline fidelity_param\n", + "{'min_value': 1, 'max_value': 10, 'log': False, 'prior': , 'prior_confidence': }\n", + "\n", + " Converted float_param\n", + "{'log': False,\n", + " 'max_value': 1.0,\n", + " 'min_value': 0.0,\n", + " 'prior': 0.5,\n", + " 'prior_confidence': }\n", + "\n", + " Pipeline float_param\n", + "{'log': False,\n", + " 'max_value': 1.0,\n", + " 'min_value': 0,\n", + " 'prior': 0.5,\n", + " 'prior_confidence': }\n", + "\n", + " Converted int_param2\n", + "{'log': True,\n", + " 'max_value': 100,\n", + " 'min_value': 1,\n", + " 'prior': 50,\n", + " 'prior_confidence': }\n", + "\n", + " Pipeline int_param2\n", + "{'log': True,\n", + " 'max_value': 100,\n", + " 'min_value': 1,\n", + " 'prior': 50,\n", + " 'prior_confidence': }\n", + "PipelineSpace SimpleSpace with parameters:\n", + "\tint_param2 = Integer(1, 100, log, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", + "\tcategorical_param = Categorical(choices=('a', 'b', 'c'), prior=0, prior_confidence=ConfidenceLevel.HIGH)\n", + "\tfloat_param = Float(0, 1.0, prior=0.5, prior_confidence=ConfidenceLevel.HIGH)\n", + "\tfidelity_param = Fidelity(Integer(1, 10))\n" + ] + } + ], + "source": [ + "import neps\n", + "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled\n", + "from neps.space.neps_spaces import sampling\n", + "from neps.space.neps_spaces import neps_space\n", + "from functools import partial\n", + "from pprint import pprint\n", + "\n", + "# Define the NEPS space for the neural network architecture\n", + "class SimpleSpace(PipelineSpace):\n", + " int_param2 = neps.Integer(1,100, prior=50, log=True, prior_confidence=\"medium\")\n", + " categorical_param = Categorical((\"a\", \"b\", \"c\"), prior=0, prior_confidence=\"high\")\n", + " float_param = neps.Float(0, 1.0, prior=0.5, prior_confidence=\"high\")\n", + " fidelity_param = neps.Fidelity(neps.Integer(1, 10))\n", + "\n", + "old_space = neps.SearchSpace({\n", + " \"int_param2\": neps.HPOInteger(1,100, log=True, prior=50, prior_confidence=\"medium\"),\n", + " \"categorical_param\": neps.HPOCategorical([\"a\", \"b\", \"c\"], prior=\"a\", prior_confidence=\"high\"),\n", + " \"float_param\": neps.HPOFloat(0, 1.0, prior=0.5, prior_confidence=\"high\"),\n", + " \"fidelity_param\": neps.HPOInteger(1, 10,is_fidelity=True)\n", + "})\n", + "pipeline = SimpleSpace()\n", + "converted_space = neps.space.neps_spaces.neps_space.convert_classic_to_neps_search_space(old_space)\n", + "\n", + "for name in converted_space.get_attrs().keys():\n", + " param = converted_space.get_attrs()[name]\n", + " print(\"\\n Converted\",name)\n", + " if isinstance(param, neps.Fidelity):\n", + " print(param._domain.get_attrs())\n", + " else:\n", + " pprint(param.get_attrs())\n", + "\n", + " param = pipeline.get_attrs()[name]\n", + " print(\"\\n Pipeline\",name)\n", + " if isinstance(param, neps.Fidelity):\n", + " print(param._domain.get_attrs())\n", + " else:\n", + " pprint(param.get_attrs())\n", + "\n", + "print(pipeline)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "59280930", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:neps.api:Starting neps.run using root directory results/fidelity_ignore_test\n", + "INFO:neps.runtime:Overwriting optimization directory 'results\\fidelity_ignore_test' as `overwrite_optimization_dir=True`.\n", + "INFO:neps.runtime:Summary files can be found in the “summary” folder inside the root directory: C:\\Users\\Amega\\Git\\neps\\neps_examples\\test_files\\results\\fidelity_ignore_test\\summary\n", + "INFO:neps.runtime:Using optimizer: neps_priorband\n", + "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 1_rung_0.\n", + "INFO:neps.state.pipeline_eval:Successful evaluation of '1_rung_0': 120.\n", + "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 1_rung_0 as State.SUCCESS.\n", + "INFO:neps.runtime:New best: trial 1_rung_0 with objective 120.0\n", + "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 2_rung_0.\n", + "INFO:neps.state.pipeline_eval:Successful evaluation of '2_rung_0': 104.\n", + "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 2_rung_0 as State.SUCCESS.\n", + "INFO:neps.runtime:New best: trial 2_rung_0 with objective 104.0\n", + "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 3_rung_0.\n", + "INFO:neps.state.pipeline_eval:Successful evaluation of '3_rung_0': 51.\n", + "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 3_rung_0 as State.SUCCESS.\n", + "INFO:neps.runtime:New best: trial 3_rung_0 with objective 51.0\n", + "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 4_rung_0.\n", + "INFO:neps.state.pipeline_eval:Successful evaluation of '4_rung_0': 88.\n", + "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 4_rung_0 as State.SUCCESS.\n", + "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 5_rung_0.\n", + "INFO:neps.state.pipeline_eval:Successful evaluation of '5_rung_0': 129.\n", + "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 5_rung_0 as State.SUCCESS.\n", + "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 6_rung_0.\n", + "INFO:neps.state.pipeline_eval:Successful evaluation of '6_rung_0': 118.\n", + "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 6_rung_0 as State.SUCCESS.\n", + "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 7_rung_0.\n", + "INFO:neps.state.pipeline_eval:Successful evaluation of '7_rung_0': 30.\n", + "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 7_rung_0 as State.SUCCESS.\n", + "INFO:neps.runtime:New best: trial 7_rung_0 with objective 30.0\n", + "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 8_rung_0.\n", + "INFO:neps.state.pipeline_eval:Successful evaluation of '8_rung_0': 109.\n", + "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 8_rung_0 as State.SUCCESS.\n", + "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 9_rung_0.\n", + "INFO:neps.state.pipeline_eval:Successful evaluation of '9_rung_0': 148.\n", + "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 9_rung_0 as State.SUCCESS.\n", + "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 7_rung_1.\n", + "INFO:neps.state.pipeline_eval:Successful evaluation of '7_rung_1': 32.\n", + "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 7_rung_1 as State.SUCCESS.\n", + "INFO:neps.runtime:The total number of fidelity evaluations has reached the maximum allowed of `self.settings.fidelities_to_spend=10`. To allow more evaluations, increase this value or use a different stopping criterion.\n", + "INFO:neps.api:The summary folder has been created, which contains csv and txt files withthe output of all data in the run (short.csv - only the best; full.csv - all runs; best_config_trajectory.txt for incumbent trajectory; and best_config.txt for final incumbent).\n", + "You can find summary folder at: results\\fidelity_ignore_test\\summary.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# Configs: 10\n", + "\n", + " success: 10\n", + "\n", + "# Best Found (config 7_rung_0):\n", + "\n", + " objective_to_minimize: 30.0\n", + " config: int_param1\n", + " 20\n", + " \t01 :: 20\n", + " config: fidelity_param\n", + " 1\n", + " \t01 :: 1\n", + " config: categorical_param\n", + " b\n", + " \t01 :: b\n", + " config: operation\n", + " Conv2D(7)\n", + " \t01 :: Conv2D\n", + " config: operation2\n", + " Sequential(Conv2d(3, 16, 3))\n", + " \t01 :: Sequential\n", + " \t\t02 :: Conv2d\n", + " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\test_files\\results\\fidelity_ignore_test\\configs\\config_7_rung_0\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "import torch\n", + "import torch.nn as nn\n", + "import neps\n", + "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled\n", + "from neps.space.neps_spaces import sampling\n", + "from neps.space.neps_spaces import neps_space\n", + "from functools import partial\n", + "\n", + "# Define the NEPS space for the neural network architecture\n", + "class SimpleSpace(PipelineSpace):\n", + " int_param1 = neps.Integer(1,100)#, prior=50, prior_confidence=\"low\")\n", + " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\")\n", + " int_param3 = neps.Integer(1,100, prior=50, prior_confidence=\"high\")\n", + " int_param4 = neps.Integer(1,3, prior=2, prior_confidence=\"low\")\n", + " categorical_param = Categorical((\"a\", \"b\", int_param1))\n", + " float_param = neps.Float(0, 1.0, prior=0.5, prior_confidence=\"high\")\n", + " fidelity_param = neps.Fidelity(neps.Integer(1, 10))\n", + " operation = neps.Operation(\n", + " \"Conv2D\",\n", + " args=neps.Resampled(int_param1),\n", + " kwargs={\n", + " \"kernel_size\": neps.Resampled(int_param4),\n", + " }\n", + " )\n", + " conv = neps.Operation(\n", + " nn.Conv2d,\n", + " args=(3,16,3),\n", + " kwargs={\n", + " \"stride\": neps.Resampled(int_param4),\n", + " }\n", + " )\n", + " operation2 = neps.Operation(\n", + " nn.Sequential,\n", + " args=(neps.Resampled(conv),),\n", + " )\n", + "\n", + "# Sampling and printing one random configuration of the pipeline\n", + "pipeline = SimpleSpace()\n", + "\n", + "def evaluate_pipeline(int_param1, int_param2, fidelity_param, categorical_param, **kwargs):\n", + " # print(kwargs)\n", + " return int_param1 + int_param2 + fidelity_param\n", + "\n", + "for i in range(1):\n", + " # resolved_pipeline, resolution_context = neps_space.resolve(pipeline,domain_sampler=sampler)\n", + " new_rs=neps.algorithms.NePSRandomSearch(pipeline,ignore_fidelity=True)\n", + " # old_rs=neps.algorithms.random_search(pipeline,ignore_fidelity=True)\n", + " # print(new_rs({},None))\n", + "\n", + " # s = resolved_pipeline.int_param1\n", + " # print(resolved_pipeline.get_attrs())\n", + " import logging\n", + "\n", + " logging.basicConfig(level=logging.INFO)\n", + " neps.run(\n", + " evaluate_pipeline,\n", + " pipeline,\n", + " root_directory=\"results/fidelity_ignore_test\",\n", + " overwrite_root_directory=True,\n", + " optimizer=neps.algorithms.neps_priorband,\n", + " fidelities_to_spend=10\n", + " )\n", + " neps.status(\"results/fidelity_ignore_test\",print_summary=True, pipeline_space_variables=(SimpleSpace(),[\"int_param1\", \"fidelity_param\", \"categorical_param\", \"operation\", \"operation2\"]))" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "94af7ec4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "('D',)\n", + "('D',)\n" + ] + } + ], + "source": [ + "from neps.space.neps_spaces import parameters, sampling, neps_space\n", + "\n", + "class TestSpace(parameters.PipelineSpace):\n", + " cat_var = parameters.Categorical((\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\"))\n", + " cat_var_choice_2 = parameters.Categorical(\n", + " (\n", + " (\n", + " parameters.Resampled(cat_var),\n", + " ),\n", + " )\n", + " )\n", + " reresampled_var = parameters.Resampled(cat_var_choice_2)\n", + " reresampled_var2 = parameters.Resampled(cat_var_choice_2)\n", + "\n", + "random_sampler = sampling.RandomSampler({})\n", + "sampler = sampling.PriorOrFallbackSampler(fallback_sampler=random_sampler, always_use_prior=False)\n", + "\n", + "resolved_pipeline, resolution_context = neps_space.resolve(TestSpace(),domain_sampler=random_sampler)\n", + "print(resolved_pipeline.reresampled_var)\n", + "print(resolved_pipeline.reresampled_var2)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "neural-pipeline-search (3.13.1)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/test_normalization_fix.py b/test_normalization_fix.py new file mode 100644 index 000000000..6bb0241ae --- /dev/null +++ b/test_normalization_fix.py @@ -0,0 +1,38 @@ +"""Test script for normalization with PipelineSpace.""" + +import neps +from neps.normalization import _normalize_imported_config + + +class TestSpace(neps.PipelineSpace): + x = neps.Float(0, 1) + y = neps.Integer(0, 10) + epochs = neps.Fidelity(neps.Integer(1, 10)) + + +space = TestSpace() + +# Config with correct SAMPLING__ and ENVIRONMENT__ keys, plus an extra invalid key +config = { + "SAMPLING__Resolvable.x::float__0_1_False": 0.5, + "SAMPLING__Resolvable.y::integer__0_10_False": 5, + "ENVIRONMENT__epochs": 3, + "extra_key": 999, # This should be removed +} + +print("Input config keys:", sorted(config.keys())) + +normalized = _normalize_imported_config(space, config) + +print("Normalized config keys:", sorted(normalized.keys())) +print("Extra key removed:", "extra_key" not in normalized) +print("\nAll expected keys present:") +print( + " - SAMPLING__Resolvable.x::float__0_1_False:", + "SAMPLING__Resolvable.x::float__0_1_False" in normalized, +) +print( + " - SAMPLING__Resolvable.y::integer__0_10_False:", + "SAMPLING__Resolvable.y::integer__0_10_False" in normalized, +) +print(" - ENVIRONMENT__epochs:", "ENVIRONMENT__epochs" in normalized) diff --git a/tests/test_neps_space/test_basic_functionality.py b/tests/test_neps_space/test_basic_functionality.py index c8c2d5f93..1083196d4 100644 --- a/tests/test_neps_space/test_basic_functionality.py +++ b/tests/test_neps_space/test_basic_functionality.py @@ -19,8 +19,8 @@ class SimpleSpace(PipelineSpace): """Simple space for testing.""" - x = Float(min_value=0.0, max_value=1.0) - y = Integer(min_value=1, max_value=10) + x = Float(lower=0.0, upper=1.0) + y = Integer(lower=1, upper=10) def simple_evaluation(x: float, y: int) -> float: diff --git a/tests/test_neps_space/test_domain__centering.py b/tests/test_neps_space/test_domain__centering.py index 37850427d..2d02a120b 100644 --- a/tests/test_neps_space/test_domain__centering.py +++ b/tests/test_neps_space/test_domain__centering.py @@ -24,12 +24,12 @@ def test_centering_integer( int_prior = 50 int1 = Integer( - min_value=1, - max_value=100, + lower=1, + upper=100, ) int2 = Integer( - min_value=1, - max_value=100, + lower=1, + upper=100, prior=int_prior, prior_confidence=confidence_level, ) @@ -41,13 +41,13 @@ def test_centering_integer( assert ( ( int1_centered.prior, - int1_centered.min_value, - int1_centered.max_value, + int1_centered.lower, + int1_centered.upper, ) == ( int2_centered.prior, - int2_centered.min_value, - int2_centered.max_value, + int2_centered.lower, + int2_centered.upper, ) == expected_prior_min_max ) @@ -78,12 +78,12 @@ def test_centering_float( float_prior = 50.0 float1 = Float( - min_value=1.0, - max_value=100.0, + lower=1.0, + upper=100.0, ) float2 = Float( - min_value=1.0, - max_value=100.0, + lower=1.0, + upper=100.0, prior=float_prior, prior_confidence=confidence_level, ) @@ -95,13 +95,13 @@ def test_centering_float( assert ( ( float1_centered.prior, - float1_centered.min_value, - float1_centered.max_value, + float1_centered.lower, + float1_centered.upper, ) == ( float2_centered.prior, - float2_centered.min_value, - float2_centered.max_value, + float2_centered.lower, + float2_centered.upper, ) == expected_prior_min_max ) @@ -111,7 +111,7 @@ def test_centering_float( @pytest.mark.parametrize( - ("confidence_level", "expected_prior_min_max_value"), + ("confidence_level", "expected_prior_min_upper"), [ (ConfidenceLevel.LOW, (40, 0, 80, 50)), (ConfidenceLevel.MEDIUM, (25, 0, 50, 50)), @@ -120,7 +120,7 @@ def test_centering_float( ) def test_centering_categorical( confidence_level, - expected_prior_min_max_value, + expected_prior_min_upper, ): # Construct domains manually and then with priors. # They are constructed in a way that after centering they both @@ -145,22 +145,22 @@ def test_centering_categorical( ) # During the centering of categorical objects, the prior index will change. - assert categorical_prior_index_original != expected_prior_min_max_value[0] + assert categorical_prior_index_original != expected_prior_min_upper[0] assert ( ( categorical1_centered.prior, - categorical1_centered.min_value, - categorical1_centered.max_value, + categorical1_centered.lower, + categorical1_centered.upper, categorical1_centered.choices[categorical1_centered.prior], ) == ( categorical2_centered.prior, - categorical2_centered.min_value, - categorical2_centered.max_value, + categorical2_centered.lower, + categorical2_centered.upper, categorical2_centered.choices[categorical2_centered.prior], ) - == expected_prior_min_max_value + == expected_prior_min_upper ) categorical1_centered.sample() @@ -180,14 +180,14 @@ def test_centering_stranger_ranges_integer( expected_prior_min_max, ): int1 = Integer( - min_value=1, - max_value=13, + lower=1, + upper=13, ) int1_centered = int1.centered_around(10, confidence_level) int2 = Integer( - min_value=1, - max_value=13, + lower=1, + upper=13, prior=10, prior_confidence=confidence_level, ) @@ -195,13 +195,13 @@ def test_centering_stranger_ranges_integer( assert ( int1_centered.prior, - int1_centered.min_value, - int1_centered.max_value, + int1_centered.lower, + int1_centered.upper, ) == expected_prior_min_max assert ( int2_centered.prior, - int2_centered.min_value, - int2_centered.max_value, + int2_centered.lower, + int2_centered.upper, ) == expected_prior_min_max int1_centered.sample() @@ -224,14 +224,14 @@ def test_centering_stranger_ranges_float( expected_prior_min_max, ): float1 = Float( - min_value=0.0, - max_value=1.0, + lower=0.0, + upper=1.0, ) float1_centered = float1.centered_around(0.5, confidence_level) float2 = Float( - min_value=0.0, - max_value=1.0, + lower=0.0, + upper=1.0, prior=0.5, prior_confidence=confidence_level, ) @@ -239,13 +239,13 @@ def test_centering_stranger_ranges_float( assert ( float1_centered.prior, - float1_centered.min_value, - float1_centered.max_value, + float1_centered.lower, + float1_centered.upper, ) == expected_prior_min_max assert ( float2_centered.prior, - float2_centered.min_value, - float2_centered.max_value, + float2_centered.lower, + float2_centered.upper, ) == expected_prior_min_max float1_centered.sample() @@ -253,7 +253,7 @@ def test_centering_stranger_ranges_float( @pytest.mark.parametrize( - ("confidence_level", "expected_prior_min_max_value"), + ("confidence_level", "expected_prior_min_upper"), [ (ConfidenceLevel.LOW, (2, 0, 5, 2)), (ConfidenceLevel.MEDIUM, (2, 0, 4, 2)), @@ -262,7 +262,7 @@ def test_centering_stranger_ranges_float( ) def test_centering_stranger_ranges_categorical( confidence_level, - expected_prior_min_max_value, + expected_prior_min_upper, ): categorical1 = Categorical( choices=tuple(range(7)), @@ -280,17 +280,17 @@ def test_centering_stranger_ranges_categorical( assert ( categorical1_centered.prior, - categorical1_centered.min_value, - categorical1_centered.max_value, + categorical1_centered.lower, + categorical1_centered.upper, categorical1_centered.choices[categorical1_centered.prior], - ) == expected_prior_min_max_value + ) == expected_prior_min_upper assert ( categorical2_centered.prior, - categorical2_centered.min_value, - categorical2_centered.max_value, + categorical2_centered.lower, + categorical2_centered.upper, categorical2_centered.choices[categorical2_centered.prior], - ) == expected_prior_min_max_value + ) == expected_prior_min_upper categorical1_centered.sample() categorical2_centered.sample() diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index 654dec671..d467a67c4 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -46,14 +46,14 @@ def hyperparameter_pipeline_to_optimize( class DemoHyperparameterSpace(PipelineSpace): float1 = Float( - min_value=0, - max_value=1, + lower=0, + upper=1, prior=0.1, prior_confidence=ConfidenceLevel.MEDIUM, ) float2 = Float( - min_value=-10, - max_value=10, + lower=-10, + upper=10, prior=0.1, prior_confidence=ConfidenceLevel.MEDIUM, ) @@ -63,14 +63,14 @@ class DemoHyperparameterSpace(PipelineSpace): prior_confidence=ConfidenceLevel.MEDIUM, ) integer1 = Integer( - min_value=0, - max_value=1, + lower=0, + upper=1, prior=0, prior_confidence=ConfidenceLevel.MEDIUM, ) integer2 = Integer( - min_value=1, - max_value=1000, + lower=1, + upper=1000, prior=10, prior_confidence=ConfidenceLevel.MEDIUM, ) @@ -78,14 +78,14 @@ class DemoHyperparameterSpace(PipelineSpace): class DemoHyperparameterWithFidelitySpace(PipelineSpace): float1 = Float( - min_value=0, - max_value=1, + lower=0, + upper=1, prior=0.1, prior_confidence=ConfidenceLevel.MEDIUM, ) float2 = Float( - min_value=-10, - max_value=10, + lower=-10, + upper=10, prior=0.1, prior_confidence=ConfidenceLevel.MEDIUM, ) @@ -95,29 +95,29 @@ class DemoHyperparameterWithFidelitySpace(PipelineSpace): prior_confidence=ConfidenceLevel.MEDIUM, ) integer1 = Integer( - min_value=0, - max_value=1, + lower=0, + upper=1, prior=0, prior_confidence=ConfidenceLevel.MEDIUM, ) integer2 = Fidelity( Integer( - min_value=1, - max_value=1000, + lower=1, + upper=1000, ), ) class DemoHyperparameterComplexSpace(PipelineSpace): _small_float = Float( - min_value=0, - max_value=1, + lower=0, + upper=1, prior=0.1, prior_confidence=ConfidenceLevel.MEDIUM, ) _big_float = Float( - min_value=10, - max_value=100, + lower=10, + upper=100, prior=20, prior_confidence=ConfidenceLevel.MEDIUM, ) @@ -145,14 +145,14 @@ class DemoHyperparameterComplexSpace(PipelineSpace): prior_confidence=ConfidenceLevel.MEDIUM, ) integer1 = Integer( - min_value=0, - max_value=1, + lower=0, + upper=1, prior=0, prior_confidence=ConfidenceLevel.MEDIUM, ) integer2 = Integer( - min_value=1, - max_value=1000, + lower=1, + upper=1000, prior=10, prior_confidence=ConfidenceLevel.MEDIUM, ) @@ -286,8 +286,8 @@ class DemoOperationSpace(PipelineSpace): # The way to sample `factor` values _factor = Float( - min_value=0, - max_value=1, + lower=0, + upper=1, prior=0.1, prior_confidence=ConfidenceLevel.MEDIUM, ) @@ -382,8 +382,8 @@ def test_pipeline_space_dynamic_methods(): # Create a basic space class BasicSpace(PipelineSpace): - x = Float(min_value=0.0, max_value=1.0) - y = Integer(min_value=1, max_value=10) + x = Float(lower=0.0, upper=1.0) + y = Integer(lower=1, upper=10) space = BasicSpace() @@ -499,8 +499,8 @@ def test_complex_neps_space_features(): class ComplexNepsSpace(PipelineSpace): # Basic parameters factor = Float( - min_value=0.1, - max_value=2.0, + lower=0.1, + upper=2.0, prior=1.0, prior_confidence=ConfidenceLevel.MEDIUM, ) @@ -543,8 +543,8 @@ def evaluate_with_metrics(x: float, y: int) -> dict: } class MetricsSpace(PipelineSpace): - x = Float(min_value=0.0, max_value=1.0) - y = Integer(min_value=1, max_value=10) + x = Float(lower=0.0, upper=1.0) + y = Integer(lower=1, upper=10) space = MetricsSpace() root_directory = tmp_path / "metrics_test" diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index ddafe81a1..7881bd1e7 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -35,28 +35,28 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): class DemoHyperparameterWithFidelitySpace(PipelineSpace): float1 = Float( - min_value=1, - max_value=1000, + lower=1, + upper=1000, log=False, prior=600, prior_confidence=ConfidenceLevel.MEDIUM, ) float2 = Float( - min_value=-100, - max_value=100, + lower=-100, + upper=100, prior=0, prior_confidence=ConfidenceLevel.MEDIUM, ) integer1 = Integer( - min_value=0, - max_value=500, + lower=0, + upper=500, prior=35, prior_confidence=ConfidenceLevel.LOW, ) fidelity = Fidelity( domain=Integer( - min_value=1, - max_value=100, + lower=1, + upper=100, ), ) diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index cf21ac6b5..586675bd6 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -22,28 +22,28 @@ def evaluate_pipeline(float1, float2, integer1, fidelity): class DemoHyperparameterWithFidelitySpace(PipelineSpace): float1 = Float( - min_value=1, - max_value=1000, + lower=1, + upper=1000, log=False, prior=600, prior_confidence=ConfidenceLevel.MEDIUM, ) float2 = Float( - min_value=-100, - max_value=100, + lower=-100, + upper=100, prior=0, prior_confidence=ConfidenceLevel.MEDIUM, ) integer1 = Integer( - min_value=0, - max_value=500, + lower=0, + upper=500, prior=35, prior_confidence=ConfidenceLevel.LOW, ) fidelity = Fidelity( domain=Integer( - min_value=1, - max_value=100, + lower=1, + upper=100, ), ) diff --git a/tests/test_neps_space/test_pipeline_space_methods.py b/tests/test_neps_space/test_pipeline_space_methods.py index 5532257e9..3dbf58001 100644 --- a/tests/test_neps_space/test_pipeline_space_methods.py +++ b/tests/test_neps_space/test_pipeline_space_methods.py @@ -19,18 +19,16 @@ class BasicSpace(PipelineSpace): """Basic space for testing dynamic methods.""" - x = Float(min_value=0.0, max_value=1.0) - y = Integer(min_value=1, max_value=10) + x = Float(lower=0.0, upper=1.0) + y = Integer(lower=1, upper=10) z = Categorical(choices=("a", "b", "c")) class SpaceWithPriors(PipelineSpace): """Space with existing priors for testing.""" - x = Float( - min_value=0.0, max_value=1.0, prior=0.5, prior_confidence=ConfidenceLevel.MEDIUM - ) - y = Integer(min_value=1, max_value=10, prior=5, prior_confidence=ConfidenceLevel.HIGH) + x = Float(lower=0.0, upper=1.0, prior=0.5, prior_confidence=ConfidenceLevel.MEDIUM) + y = Integer(lower=1, upper=10, prior=5, prior_confidence=ConfidenceLevel.HIGH) z = Categorical( choices=("a", "b", "c"), prior=1, prior_confidence=ConfidenceLevel.LOW ) @@ -45,7 +43,7 @@ def test_add_method_basic(): original_attrs = space.get_attrs() # Add a new parameter - new_param = Float(min_value=10.0, max_value=20.0) + new_param = Float(lower=10.0, upper=20.0) updated_space = space.add(new_param, "new_float") # Original space should be unchanged @@ -63,7 +61,7 @@ def test_add_method_different_types(): space = BasicSpace() # Add Integer - space = space.add(Integer(min_value=0, max_value=100), "new_int") + space = space.add(Integer(lower=0, upper=100), "new_int") assert "new_int" in space.get_attrs() assert isinstance(space.get_attrs()["new_int"], Integer) @@ -91,7 +89,7 @@ def test_add_method_with_default_name(): original_count = len(space.get_attrs()) # Add without specifying name - new_param = Float(min_value=5.0, max_value=15.0) + new_param = Float(lower=5.0, upper=15.0) updated_space = space.add(new_param) updated_attrs = updated_space.get_attrs() @@ -119,7 +117,7 @@ def test_add_method_conflicting_parameter(): space = BasicSpace() # Try to add a different parameter with existing name - different_param = Integer(min_value=0, max_value=5) # Different from existing "x" + different_param = Integer(lower=0, upper=5) # Different from existing "x" with pytest.raises(ValueError, match="A different parameter with the name"): space.add(different_param, "x") @@ -131,8 +129,8 @@ def test_add_method_chaining(): # Chain multiple additions final_space = ( - space.add(Float(min_value=100.0, max_value=200.0), "param1") - .add(Integer(min_value=0, max_value=50), "param2") + space.add(Float(lower=100.0, upper=200.0), "param1") + .add(Integer(lower=0, upper=50), "param2") .add(Categorical(choices=(1, 2, 3)), "param3") ) @@ -301,11 +299,11 @@ def test_combined_operations(): # Complex chain of operations final_space = ( - space.add(Float(min_value=50.0, max_value=100.0), "new_param") + space.add(Float(lower=50.0, upper=100.0), "new_param") .remove("y") .add_prior("x", prior=0.25, prior_confidence=ConfidenceLevel.HIGH) .add_prior("new_param", prior=75.0, prior_confidence=ConfidenceLevel.MEDIUM) - .add(Integer(min_value=0, max_value=10), "another_param") + .add(Integer(lower=0, upper=10), "another_param") ) attrs = final_space.get_attrs() @@ -332,7 +330,7 @@ def test_immutability(): original_attrs = original_space.get_attrs() # Perform various operations - space1 = original_space.add(Float(min_value=0.0, max_value=1.0), "temp") + space1 = original_space.add(Float(lower=0.0, upper=1.0), "temp") space2 = original_space.remove("x") space3 = original_space.add_prior("y", prior=5, prior_confidence=ConfidenceLevel.HIGH) @@ -352,13 +350,13 @@ def test_fidelity_operations(): """Test operations with fidelity parameters.""" class FidelitySpace(PipelineSpace): - x = Float(min_value=0.0, max_value=1.0) - epochs = Fidelity(Integer(min_value=1, max_value=100)) + x = Float(lower=0.0, upper=1.0) + epochs = Fidelity(Integer(lower=1, upper=100)) space = FidelitySpace() # Add another parameter (non-fidelity since add doesn't support Fidelity directly) - new_param = Integer(min_value=1, max_value=50) + new_param = Integer(lower=1, upper=50) space = space.add(new_param, "batch_size") # Check that original fidelity is preserved @@ -384,7 +382,7 @@ def test_space_string_representation(): # Perform operations modified_space = ( - space.add(Float(min_value=10.0, max_value=20.0), "added_param") + space.add(Float(lower=10.0, upper=20.0), "added_param") .remove("y") .add_prior("x", prior=0.8, prior_confidence=ConfidenceLevel.LOW) ) diff --git a/tests/test_neps_space/test_search_space__fidelity.py b/tests/test_neps_space/test_search_space__fidelity.py index 820e54191..ce19f49a7 100644 --- a/tests/test_neps_space/test_search_space__fidelity.py +++ b/tests/test_neps_space/test_search_space__fidelity.py @@ -18,15 +18,15 @@ class DemoHyperparametersWithFidelitySpace(PipelineSpace): constant1: int = 42 float1 = Float( - min_value=0, - max_value=1, + lower=0, + upper=1, prior=0.1, prior_confidence=ConfidenceLevel.MEDIUM, ) fidelity_integer1 = Fidelity( domain=Integer( - min_value=1, - max_value=1000, + lower=1, + upper=1000, ), ) @@ -39,8 +39,8 @@ def test_fidelity_creation_raises_when_domain_has_prior(): ): Fidelity( domain=Integer( - min_value=1, - max_value=1000, + lower=1, + upper=1000, prior=10, prior_confidence=ConfidenceLevel.MEDIUM, ), diff --git a/tests/test_neps_space/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py index 4bf6f47d7..08ac1d4c3 100644 --- a/tests/test_neps_space/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -25,7 +25,7 @@ class HNASLikePipeline(PipelineSpace): # Adding `PReLU` with a float hyperparameter `init` # Note that the sampled `_prelu_init_value` will be shared across all `_PRELU` uses, # since no `Resampled` was requested for it - _prelu_init_value = Float(min_value=0.1, max_value=0.9) + _prelu_init_value = Float(lower=0.1, upper=0.9) _PRELU = Operation( operator="ACT prelu", kwargs={"init": _prelu_init_value}, diff --git a/tests/test_neps_space/test_search_space__recursion.py b/tests/test_neps_space/test_search_space__recursion.py index f2ca9d60d..4f65b1e01 100644 --- a/tests/test_neps_space/test_search_space__recursion.py +++ b/tests/test_neps_space/test_search_space__recursion.py @@ -39,7 +39,7 @@ def __call__(self, values: Sequence[float]) -> float: class DemoRecursiveOperationSpace(PipelineSpace): # The way to sample `factor` values - _factor = Float(min_value=0, max_value=1) + _factor = Float(lower=0, upper=1) # Sum _sum = Operation(operator=Sum) diff --git a/tests/test_neps_space/test_search_space__resampled.py b/tests/test_neps_space/test_search_space__resampled.py index fc4fe3450..4fba5a874 100644 --- a/tests/test_neps_space/test_search_space__resampled.py +++ b/tests/test_neps_space/test_search_space__resampled.py @@ -16,8 +16,8 @@ class ActPipelineSimpleFloat(PipelineSpace): prelu_init_value = Float( - min_value=0, - max_value=1000000, + lower=0, + upper=1000000, log=False, prior=0.25, prior_confidence=ConfidenceLevel.LOW, @@ -53,7 +53,7 @@ class ActPipelineSimpleFloat(PipelineSpace): class ActPipelineComplexInteger(PipelineSpace): - prelu_init_value = Integer(min_value=0, max_value=1000000) + prelu_init_value = Integer(lower=0, upper=1000000) prelu_shared1 = Operation( operator="prelu", diff --git a/tests/test_neps_space/test_search_space__reuse_arch_elements.py b/tests/test_neps_space/test_search_space__reuse_arch_elements.py index 942cc2185..ef2a8b222 100644 --- a/tests/test_neps_space/test_search_space__reuse_arch_elements.py +++ b/tests/test_neps_space/test_search_space__reuse_arch_elements.py @@ -31,7 +31,7 @@ class ActPipelineSimple(PipelineSpace): class ActPipelineComplex(PipelineSpace): - prelu_init_value: float = Float(min_value=0.1, max_value=0.9) + prelu_init_value: float = Float(lower=0.1, upper=0.9) prelu = Operation( operator="prelu", kwargs={"init": prelu_init_value}, @@ -61,8 +61,8 @@ class FixedPipeline(PipelineSpace): class ConvPipeline(PipelineSpace): conv_choices_prior_index: int = Integer( - min_value=0, - max_value=1, + lower=0, + upper=1, log=False, prior=0, prior_confidence=ConfidenceLevel.LOW, @@ -118,7 +118,7 @@ class CellPipeline(PipelineSpace): ) _some_int = 2 - _some_float = Float(min_value=0.5, max_value=0.5) + _some_float = Float(lower=0.5, upper=0.5) cell = Operation( operator="cell", diff --git a/tests/test_neps_space/test_space_conversion_and_compatibility.py b/tests/test_neps_space/test_space_conversion_and_compatibility.py index 360c57374..be908859f 100644 --- a/tests/test_neps_space/test_space_conversion_and_compatibility.py +++ b/tests/test_neps_space/test_space_conversion_and_compatibility.py @@ -26,10 +26,8 @@ class SimpleHPOSpace(PipelineSpace): """Simple hyperparameter-only space that can be converted to classic.""" - x = Float( - min_value=0.0, max_value=1.0, prior=0.5, prior_confidence=ConfidenceLevel.MEDIUM - ) - y = Integer(min_value=1, max_value=10, prior=5, prior_confidence=ConfidenceLevel.HIGH) + x = Float(lower=0.0, upper=1.0, prior=0.5, prior_confidence=ConfidenceLevel.MEDIUM) + y = Integer(lower=1, upper=10, prior=5, prior_confidence=ConfidenceLevel.HIGH) z = Categorical( choices=("a", "b", "c"), prior=1, prior_confidence=ConfidenceLevel.LOW ) @@ -38,11 +36,9 @@ class SimpleHPOSpace(PipelineSpace): class SimpleHPOWithFidelitySpace(PipelineSpace): """Simple hyperparameter space with fidelity.""" - x = Float( - min_value=0.0, max_value=1.0, prior=0.5, prior_confidence=ConfidenceLevel.MEDIUM - ) - y = Integer(min_value=1, max_value=10, prior=5, prior_confidence=ConfidenceLevel.HIGH) - epochs = Fidelity(Integer(min_value=1, max_value=100)) + x = Float(lower=0.0, upper=1.0, prior=0.5, prior_confidence=ConfidenceLevel.MEDIUM) + y = Integer(lower=1, upper=10, prior=5, prior_confidence=ConfidenceLevel.HIGH) + epochs = Fidelity(Integer(lower=1, upper=100)) class ComplexNepsSpace(PipelineSpace): @@ -50,7 +46,7 @@ class ComplexNepsSpace(PipelineSpace): # Basic parameters factor = Float( - min_value=0.1, max_value=2.0, prior=1.0, prior_confidence=ConfidenceLevel.MEDIUM + lower=0.1, upper=2.0, prior=1.0, prior_confidence=ConfidenceLevel.MEDIUM ) # Operation with resampled parameters @@ -448,10 +444,10 @@ def test_conversion_preserves_log_scaling(): ) neps_space = convert_classic_to_neps_search_space(classic_space) - # Access the Float parameter and check if it has a _log attribute + # Access the Float parameter and check if it has a log attribute log_param_neps = neps_space.get_attrs()["log_param"] - assert hasattr(log_param_neps, "_log") - assert log_param_neps._log is True + assert hasattr(log_param_neps, "log") + assert log_param_neps.log is True # Round-trip conversion should now preserve log scaling converted_back = convert_neps_to_classic_search_space(neps_space) diff --git a/tests/test_runtime/test_trajectory_and_metrics.py b/tests/test_runtime/test_trajectory_and_metrics.py index b50cbd15d..d77b99edb 100644 --- a/tests/test_runtime/test_trajectory_and_metrics.py +++ b/tests/test_runtime/test_trajectory_and_metrics.py @@ -21,16 +21,16 @@ class SimpleSpace(PipelineSpace): """Simple space for testing metrics functionality.""" - x = Float(min_value=0.0, max_value=1.0) - y = Integer(min_value=1, max_value=10) + x = Float(lower=0.0, upper=1.0) + y = Integer(lower=1, upper=10) class SpaceWithFidelity(PipelineSpace): """Space with fidelity for testing multi-fidelity metrics.""" - x = Float(min_value=0.0, max_value=1.0) - y = Integer(min_value=1, max_value=10) - epochs = Fidelity(Integer(min_value=1, max_value=50)) + x = Float(lower=0.0, upper=1.0) + y = Integer(lower=1, upper=10) + epochs = Fidelity(Integer(lower=1, upper=50)) def simple_evaluation(x: float, y: int) -> dict: From 9271369cc6972f9b40a2dfb025606f82c9a0302e Mon Sep 17 00:00:00 2001 From: Nastaran Alipour Date: Mon, 3 Nov 2025 00:17:38 +0100 Subject: [PATCH 103/156] fix report loading --- neps/utils/trial_io.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/neps/utils/trial_io.py b/neps/utils/trial_io.py index b1e823a10..6e0397a1b 100644 --- a/neps/utils/trial_io.py +++ b/neps/utils/trial_io.py @@ -3,8 +3,9 @@ from __future__ import annotations from collections.abc import Mapping, Sequence, ValuesView +from dataclasses import asdict from pathlib import Path -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, cast from neps.state.neps_state import TrialRepo from neps.state.pipeline_eval import UserResultDict @@ -37,7 +38,7 @@ def load_trials_from_pickle( ) return [ - (trial.config, UserResultDict(**trial.report.__annotations__)) + (trial.config, cast(UserResultDict, asdict(trial.report))) for trial in trials if trial.report is not None ] From d1d7ff83726fcb579c1398b8c68f08a2d6955efb Mon Sep 17 00:00:00 2001 From: Nastaran Alipour Date: Mon, 3 Nov 2025 00:50:14 +0100 Subject: [PATCH 104/156] update ask_and_tell example --- neps_examples/experimental/ask_and_tell_example.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/neps_examples/experimental/ask_and_tell_example.py b/neps_examples/experimental/ask_and_tell_example.py index 981809c1a..ce49c0d93 100644 --- a/neps_examples/experimental/ask_and_tell_example.py +++ b/neps_examples/experimental/ask_and_tell_example.py @@ -96,10 +96,11 @@ def train_worker(trial_file): json.dump({"loss": loss}, f) def main(parallel: int, results_dir: Path): - space = neps.SearchSpace( - {"a": neps.Integer(1, 13, is_fidelity=True), "b": neps.Float(1, 5)} - ) - opt = neps.algorithms.successive_halving(space, eta=3) + class MySpace(neps.PipelineSpace): + a = neps.Fidelity(neps.Integer(1, 13)) + b = neps.Float(1, 5) + space = MySpace() + opt = neps.algorithms.neps_hyperband(space, eta=3) ask_tell = AskAndTell(opt) results_dir.mkdir(exist_ok=True, parents=True) From 509dc50962d285cec55775ff6d6aaff542f2471e Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 3 Nov 2025 10:33:33 +0100 Subject: [PATCH 105/156] Remove obsolete Jupyter notebook for prior tests and update test handling for core examples - Deleted the `priors_test.ipynb` file as it is no longer needed. - Enhanced the test for core examples to handle known recursion issues in the `pytorch_nn_example.py` file. - Added specific exception handling for `RecursionError` and `WorkerFailedToGetPendingTrialsError` to allow for expected failures instead of skipping tests. --- .../experimental/ask_and_tell_example.py | 20 +- .../test_files/algo_comparisons.ipynb | 161 -- neps_examples/test_files/algo_tests.ipynb | 1403 ----------------- neps_examples/test_files/priors_test.ipynb | 400 ----- tests/test_examples.py | 26 +- 5 files changed, 35 insertions(+), 1975 deletions(-) delete mode 100644 neps_examples/test_files/algo_comparisons.ipynb delete mode 100644 neps_examples/test_files/algo_tests.ipynb delete mode 100644 neps_examples/test_files/priors_test.ipynb diff --git a/neps_examples/experimental/ask_and_tell_example.py b/neps_examples/experimental/ask_and_tell_example.py index ce49c0d93..d8f9dd325 100644 --- a/neps_examples/experimental/ask_and_tell_example.py +++ b/neps_examples/experimental/ask_and_tell_example.py @@ -1,24 +1,24 @@ """ # AskAndTell Example: Custom Trial Execution with NePS -This script demonstrates how to use the `AskAndTell` interface from NePS to implement a custom trial execution workflow. -The `AskAndTell` interface provides full control over the evaluation loop, allowing you to manage how trials are executed +This script demonstrates how to use the `AskAndTell` interface from NePS to implement a custom trial execution workflow. +The `AskAndTell` interface provides full control over the evaluation loop, allowing you to manage how trials are executed and results are reported back to the optimizer. This is particularly useful when you need to handle trial execution manually. ## Aim of This File -The goal of this script is to run a **successive halving** optimization process with 3 rungs. The first rung will evaluate -9 trials in parallel. The trials are managed manually using the `AskAndTell` interface, and the SLURM scheduler is used -to execute the trials. This setup demonstrates how to efficiently manage parallel trial execution and integrate NePS +The goal of this script is to run a **successive halving** optimization process with 3 rungs. The first rung will evaluate +9 trials in parallel. The trials are managed manually using the `AskAndTell` interface, and the SLURM scheduler is used +to execute the trials. This setup demonstrates how to efficiently manage parallel trial execution and integrate NePS with external job schedulers. ## How to Use This Script 1. **Define the Search Space**: - The search space is defined using `neps.SearchSpace`. + The search space is defined using `neps.PipelineSpace`. 2. **Initialize the Optimizer**: - We use the `successive_halving` algorithm from NePS to optimize the search space. The optimizer is wrapped with + We use the `hyperband` algorithm from NePS to optimize the search space. The optimizer is wrapped with the `AskAndTell` interface to enable manual control of the evaluation loop. 3. **Submit Jobs**: @@ -26,7 +26,7 @@ - The `get_job_script` function generates a SLURM job script that executes the `train_worker` function for a given trial. 4. **Train Worker**: - - The `train_worker` function reads the trial configuration, evaluates a dummy objective function, and writes the + - The `train_worker` function reads the trial configuration, evaluates a dummy objective function, and writes the results to a JSON file. 5. **Main Loop**: @@ -136,11 +136,11 @@ class MySpace(neps.PipelineSpace): if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( - "--parallel", type=int, default=9, + "--parallel", type=int, default=9, help="Number of trials to evaluate in parallel initially" ) parser.add_argument( - "--results-dir", type=Path, default=Path("results"), + "--results-dir", type=Path, default=Path("results"), help="Path to save the results inside" ) args = parser.parse_args() diff --git a/neps_examples/test_files/algo_comparisons.ipynb b/neps_examples/test_files/algo_comparisons.ipynb deleted file mode 100644 index 99234baaf..000000000 --- a/neps_examples/test_files/algo_comparisons.ipynb +++ /dev/null @@ -1,161 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "f261ba3d", - "metadata": {}, - "source": [ - "## The new, NePS-based algorithms should perform at least as well as the old ones." - ] - }, - { - "cell_type": "markdown", - "id": "dfe93097", - "metadata": {}, - "source": [ - "Test on the Hartmann6 function with (and without) fidelity" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e5f2a155", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-0.3703557067629127\n", - "-1.7570641798509674\n", - "-3.322368011415477\n" - ] - } - ], - "source": [ - "import neps\n", - "from neps import algorithms\n", - "import matplotlib.pyplot as plt\n", - "from pprint import pprint\n", - "import numpy as np\n", - "\n", - "# Parameters for the Hartmann 6D function\n", - "alpha = np.array([1.0, 1.2, 3.0, 3.2])\n", - "A = np.array([\n", - " [10.0, 3.0, 17.0, 3.5, 1.7, 8.0],\n", - " [0.05, 10.0, 17.0, 0.1, 8.0, 14.0],\n", - " [3.0, 3.5, 1.7, 10.0, 17.0, 8.0],\n", - " [17.0, 8.0, 0.05, 10.0, 0.1, 14.0]\n", - "])\n", - "P = 1e-4 * np.array([\n", - " [1312, 1696, 5569, 124, 8283, 5886],\n", - " [2329, 4135, 8307, 3736, 1004, 9991],\n", - " [2348, 1451, 3522, 2883, 3047, 6650],\n", - " [4047, 8828, 8732, 5743, 1091, 381]\n", - "])\n", - "# P should be divided by 10000 to match the common constants (1e-4 factor)\n", - "\n", - "def hartmann6(x1, x2, x3, x4, x5, x6):\n", - " \"\"\"x must be a 6-dimensional numpy array or list-like.\"\"\"\n", - " x = np.array([x1, x2, x3, x4, x5, x6])\n", - " r = A * (x - P)**2\n", - " return -np.sum(alpha * np.exp(-np.sum(r, axis=1)))\n", - "\n", - "def mf_hartmann6(x1, x2, x3, x4, x5, x6, fidelity=10):\n", - " \"\"\"Multi-fidelity Hartmann 6D function.\n", - " \n", - " fidelity: float in (1, 10), where 1 is the lowest fidelity and 10 is the highest.\n", - " The function value is scaled by (fidelity / 10) and noise is added inversely proportional to fidelity.\n", - " \"\"\"\n", - " if fidelity < 1.0 or fidelity > 10.0:\n", - " raise ValueError(\"Fidelity must be in the range [1, 10]\")\n", - " \n", - " base_value = hartmann6(x1, x2, x3, x4, x5, x6)\n", - " noise = np.random.normal(0, (10 - fidelity) / 10 * 0.1) # Noise decreases with higher fidelity\n", - " return {\"objective_to_minimize\" : base_value * (fidelity / 10) + noise,\n", - " \"cost\" : fidelity}\n", - "\n", - "global_optimum = [0.20168952, 0.15001069, 0.47687398, 0.2753324, 0.31165163, 0.65730053]\n", - "\n", - "print(mf_hartmann6(*global_optimum, fidelity=1)) # Should be approximately -3.32237\n", - "print(mf_hartmann6(*global_optimum, fidelity=5)) # Should be approximately -3.32237\n", - "print(mf_hartmann6(*global_optimum, fidelity=10)) # Should be approximately -3.32237\n" - ] - }, - { - "cell_type": "markdown", - "id": "c41bafb7", - "metadata": {}, - "source": [ - "Creating four search spaces: one without fidelity and three with, two of them with either good or bad priors." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8f012aa3", - "metadata": {}, - "outputs": [], - "source": [ - "class HartmannSpaceFid(neps.PipelineSpace):\n", - " x1 = neps.Float(0, 1)\n", - " x2 = neps.Float(0, 1)\n", - " x3 = neps.Float(0, 1)\n", - " x4 = neps.Float(0, 1)\n", - " x5 = neps.Float(0, 1)\n", - " x6 = neps.Float(0, 1)\n", - " fidelity = neps.Fidelity(neps.Integer(1, 10))\n", - "\n", - "hartmann_space_fid = HartmannSpaceFid()\n", - "hartmann_space_base = hartmann_space_fid.remove(\"fidelity\")\n", - "\n", - "hartmann_space_fid_good_priors = hartmann_space_base\n", - "hartmann_space_fid_bad_priors = hartmann_space_base\n", - "for n, param in enumerate([\"x1\", \"x2\", \"x3\"]):\n", - " hartmann_space_fid_good_priors = hartmann_space_fid_good_priors.add_prior(param, global_optimum[n], \"medium\")\n", - " hartmann_space_fid_bad_priors = hartmann_space_fid_bad_priors.add_prior(param, 1, \"medium\")\n", - "\n", - "# print(\"Hartmann 6D space with fidelity:\")\n", - "# print(hartmann_space_fid)\n", - "# print(\"\\nHartmann 6D space without fidelity:\")\n", - "# print(hartmann_space)\n", - "# print(\"\\nHartmann 6D space with fidelity and good priors:\")\n", - "# print(hartmann_space_fid_good_priors)\n", - "# print(\"\\nHartmann 6D space with fidelity and bad priors:\")\n", - "# print(hartmann_space_fid_bad_priors)" - ] - }, - { - "cell_type": "markdown", - "id": "6f48abde", - "metadata": {}, - "source": [ - "We compare the following algorithms:\n", - "- Random Search (with and without priors and fidelity)\n", - "- HyperBand (with and without priors)\n", - "- PriorBand (with and without fidelity)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "neural-pipeline-search (3.13.1)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.1" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/neps_examples/test_files/algo_tests.ipynb b/neps_examples/test_files/algo_tests.ipynb deleted file mode 100644 index d94a7ab39..000000000 --- a/neps_examples/test_files/algo_tests.ipynb +++ /dev/null @@ -1,1403 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "4d423fb2", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "2\n", - "2\n", - "2\n", - "2\n", - "2\n", - "2\n", - "2\n", - "2\n", - "2\n", - "3\n", - "3\n", - "3\n", - "4\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "0\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n", - "2\n", - "2\n", - "2\n", - "2\n", - "2\n", - "2\n", - "2\n", - "2\n", - "2\n", - "3\n", - "3\n", - "3\n", - "4\n", - "1\n", - "1\n", - "1\n", - "1\n", - "1\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAh8AAAGdCAYAAACyzRGfAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAALGtJREFUeJzt3Ql8VNXd//EfEUhQNgElIkSwLoAIVUSI0qospkgRBW1VVKTUFVGgVo2KaC2G2iqKsrgguIAorWChLqWRRWvCKgouCMpjohBwgwBKgjLP63ue/53/TJgEkkxOSPJ5v17XZGYuMydnxjnfe5Z7a4VCoZABAAB4kuDrhQAAAITwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMCr2naQ2bt3r23atMkaNGhgtWrVquziAACAA6Bzlu7YscNatGhhCQkJVSt8KHi0atWqsosBAADKIDc311q2bFm1wod6PILCN2zYsLKLAwAADkB+fr7rPAja8SoVPoKhFgUPwgcAAFXLgUyZYMIpAADwivABAAC8InwAAACvDro5HwAOvuVzP/74o/3000+VXRQAlaxOnTp2yCGHlPt5CB8AilVYWGibN2+277//vrKLAuAgmUyqZbT169cv1/MQPgAUe8K/jRs3uqMcnTSobt26nPgPqOG9oF999ZV98cUXdvzxx5erB4TwAaDYXg8FEK3bP/TQQyu7OAAOAkcccYT9z//8j+3Zs6dc4YMJpwBKtL/TJAOoOWrFqfeTbxUAAOAV4QMAarCzzz7bRowYUeI+06dPt8aNG9vBUD5Nfh44cKA7A7aOwrdt21Yp5aoKWrdubQ8//LAdjJjzAaDUxi/4xOvrjex9Qqn2v+qqq+yZZ56xjIwMu/3228P3z5071y688EI3ca6yqCFXYxqr0VRjOmfOHLvgggvCtwPBxN+LLrrI/V2JiYlxKc/LL7/slk9GNlgq3/4CiS9Fy6f39a233rJ33nnHmjVrZo0aNarU8qFs6PkAUC0lJSXZX/7yF/vuu++sKps2bZpb7qyVR5MmTbLnnnvO/vznP8ft+Zs0aXJAFwKrLEXL9+mnn1q7du2sQ4cOlpycXKY5CDpnjSZTY/+TzisK4QNAtdSrVy/XOKmXoCRvv/22/eIXv7B69eq5lT033XST7dq1K6on4L777rNLL73UDjvsMDv66KNt4sSJ4cfVi3LPPfdYSkqK641Q74SeI1403KG/Q2X79a9/bf3797dVq1YVu796Rm688cbwbfVgqIH++OOPww2K/o7//Oc/+wxr6PfPP//cRo4c6f5N0Yb9jTfecA2/zvHwq1/9yoWi0gzVqOcp8jlVbz//+c9doFI9qxfjkksusR07doT3KVq+Bx980JYsWeKeR7dFAfPKK6+0ww8/3K3M6tOnj61fv36fsvzzn/+09u3bu/cpJyfHvaaCnP6t/qZjjjnG7aPlpKpn3dexY0dbsWJFsX/n/t5//W2nnXaaC1B6Hy+77DLbunVr+PFFixa5v0V1e8opp7jPYY8ePdw+r732mqtvDTHp30Web0d/u95nbao39QKNHj26xF499bb9/ve/dytW9Jx6nffee2+f9+Opp56yNm3auABfUQgfAKolDVPcf//99uijj7rzEsSio2g1oppD8P7779uLL77owkhk4y1//etfrVOnTvbuu++6YZybb77ZFixY4B77xz/+YePHj7fHH3/cNXhqYE8++eQK+Zs++eQTe/PNN61r167F7nPWWWe5Bi2wePFi1zAF9y1fvtwtkzzjjDNiDnHoBFJ/+tOfXLCIDBdq+P72t7+5xlSNvxrvW265pdx/k94D1dn8+fPdpvKOGzcu5r4q39VXX22pqamubLodDLMpICg4ZGVluQb4vPPOc39nZPnVE6aG9YMPPrAjjzzS3a/37swzz3Tvbd++fe2KK65wYeTyyy93Ie9nP/uZu11co76/919lUHhVI6/HtEz1qquu2ud51PA/9thjbjgpNzfXfvOb37j5GjNnzrR//etf9u9//9t9liNpCKp27dq2bNkye+SRR+yhhx5yf19xLr744nCoWblypZ166qnWs2dP+/bbb8P7bNiwwf1NqtvVq1dbRWHOx8KSj4pKdE56PEsCIM40v0NHcmPGjLGpU6fu87h6RQYNGhQ+staJkyZMmOAa8MmTJ4eP/NQ4BXNHTjjhBPvvf//rGpzevXu7RlhHtOpp0dwEHQGffvrpJZZr+/btB3yGSPW4KEjpFPcFBQWu9yM9vfjvHh0RKxzp6F0N04cffuiOiBU+rrvuOvezS5cuMc/doiEOvVZwlB5JjeiUKVNcYywKaAop5aXhD/VMBEMravwzMzNt7NixMcuncuuEd0H51OArdOg9CQLVjBkzXE+RGns1uEH5NWylEBlJIeXaa691v999993ufVf9BP/utttuc2Fny5Yt+9SJ7O/9/93vfhf+/dhjj3Wfry5dutjOnTujPgPqgdHnTIYOHereYwUz/ZugR2vhwoWuPAH9jfocqufkxBNPtDVr1rjbCmhFKVQrpCh8BPOFFCZVR3//+9/tmmuuCfeMPfvss653pCLR8wGgWtPRro4QP/roo30e09GoGj41AsGWlpYWPrtrQI1PJN0Onk+N1A8//OAaCX3pa8KogkJJ1NDqqLLoFosaEz2msqpnQL0faqCLo7kQaqTVg6CJmerKV2DRbdHPYLiiNNToB8FDjjrqqKjhg7LS0EfknI7SPq/eB4WsyN6gpk2busY48j1XYNEQSlGR9zVv3tz9jOy5CO4rrkz7e//Vw9CvXz8XSvR3KtgGoaWkcqi+g+AR3Fe0DN26dYsaxtLnUmEs1nWY9PlR4FHdRH7e9TlXyAlo6Kmig4fQ8wGgWvvlL3/pAoWOJIt2d+vLWEe9seZoqLE4EDr6XLdunZtDoaGYG264wQ3TqJGPXKVR9MRtxx133AE9v46qg33VoGo+hHpDdKQc6znUGOlvVg+HjnAVNNSwqddk7dq1rlu/LMMlRf8WvU5J8wv0NxZ9PHIYpKTnrYjJoJpLEWtyauTrB4/Huq+4MpX0/qsXQZ89beqNUaOu0JGWlrbPZM6irxnvetFnXcEuckguEDk3R/OBfCB8AKj2NIdAwy9qvCNpzFvDEvsLAtnZ2fvc1kTAyIZNR7fahg0bZm3btnVd4Hr+eAtOaa2j7eLo6PrJJ5904UPDFwoCCiRqFBVCgu79WNRDEI8rGKuhVVDS5N2gQauIOQR6H9TTsHTp0vCwyzfffOMCgSaX+lDc+6/wpbLo86eQIiVNXi0t/c1FP5fFXXNFn8W8vDzXS6TepsrGsAuAak/d6JrbofH2SBo/V0+A5i+oYVSX9SuvvLLPhFPNJ3jggQfckIdWusyePdvNqxAN22g+iXoVPvvsM3v++eddY6Tu63jQCgU1Gps2bXJH05pnoXknkeGnKPV2KFRpYmX37t3D9+noWysvSjq6VcOkCaVffvmlff3112Uut4ZBNHRwxx13uG59TZxUXcWbGlutTNGQh+Y1aHhBk0W1Kkn3V7SS3n/1ninMaaKoHtPcFE0+jRf1oowaNcoFrRdeeMG9TvC5LEpzUjQso3PIaPKqJr7qs3/nnXfGNRAdKMIHgBpBjXbRbmsNR6hBV6jQclvNj9CkQy2XjPSHP/zBfUHrcQ13aFWBus6DLmv1Mqg3Qc+n7vd58+a5sfV4GDJkiOsu1yoUDbecdNJJbrWCjmBLClsql3p7gkmNCh/q0djffA/Vkxomze8oz9i/5p2oIX711VddedQ4akVHRZ0LpXPnzm5uixpY9TjodYsb9oqnkt5/1Z/CicKqemHUA6JJnvGiVTjqAdMEV/W4KHgEE0eL0rCN6kQ9YPpMKcBqWbOWVgfzWnyqFarMU/3FkJ+f79Ysaza41iFX9JkZu+U8Uebnyk6J/SZX9tkdgXjYvXu3m4xW0ev9D3YH2xk/AVGIVLj0ffr0kr4XStN+0/MBAAC8InwAAACvWO0CACXQ/AfgYLMoxpLZqoSeDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAgAqmU1vPnTu3xH10xV1dd+NgKN/HH3/sLteuM1jqLJoofhm26q4iLphX3XGeDwCltzDD7+udk16q3dWQ64JsRRt8nRvhnHPOse+++85dkyO4HVBje+yxx5Z4jYyy2Lx5sx1++OHhBkunpn733XcPmoY9snwyZswYd/E5XbAsuDYMEE+EDwA1nhpZXYtCF+nSRcGuv/56d2G1nj17xuX5k5OT7WBWtHy6Cm3fvn3LdWXewsJCd0VXlKywhtYTwy4AarwjjzzSNcDqkbjpppvcz1WrVsXcV9fi1NVK//73v4fvUw+Grjwb0KXdExMT7fvvv99nWEPPLbpCru4vepVZXfVUz6WroupKpXv27CnVUI0ugBf5nPpdf9Ott97qrjSrv7Po1WUjy6ffV65c6a5uq9+DfdesWWM9evRwl4tX2dQztHPnzn3KMnbsWHdV4BNPPDE8LPHSSy+5qwbr33bp0sVdRXj58uV22mmnuZ6VPn362FdffVXs36meqkGDBrl613Mcf/zx7kq2gdtuu81dpfXQQw91PVejR4+Oqjf9DXqPnn76aXeZe73mDTfc4K7y+8ADD7g60WdAZS9aL5MnT3bl0+vquSPf91jWrl3r9tdr6GqxV1xxhX399ddR78eNN97o3qdmzZqFr45c0xA+ACAiWLz++uuWk5NjXbt2jbmPGiRdljw4vbUaxo8++sj1mmiuhCxevNg1smoMi1q2bJn7qUuva7jj5ZdfDj+2cOFC1+ugn88884y7HLu28tJzaRhl6dKlrrFVsFiwYEHMfVWmk046yf7whz+432+55RbbtWuXayQ1NKPQoEvEq/xqRCNlZma6XiQ99/z586OGce666y4X6GrXrm2XXXaZC0OPPPKIvfXWW7Zhwwa7++67iy2/wsSHH35or732mqtrBQI13IEGDRq4etI+ek5d4n78+PFRz6F61b/X+/vCCy/Y1KlTXe/OF1984d6vv/zlL66MqqOirz1w4EB77733XADSZehVhlg01KeApmC5YsUK91pbtmyx3/zmN/u8H3Xr1rX//ve/NmXKFKuJGHYBUC2p8Ss6X0FHurG0bNnS/SwoKLC9e/e6xlkBozg6en388cfd70uWLHGNjY6eFUjatm3rfp511lkx/62O3kW9B0WHO9S4P/bYY3bIIYe451HjqAb96quvtvLo2LGjCwCiXgO9hp63d+/e++yrMikgqO6C8qkx16XUn332WRdiRM/Rr18/12jrCF/02FNPPRUeRgiui6MAExzhaz7NpZde6l7/zDPPdPcNHTq0xJClMKg6Vk+JtG7dOupxhYaAHtPrzZo1ywWcgN5X9XwoqLRv397N9VFQevXVVy0hIcH11OhvUfCLDJ4XX3yx/f73v3e/33fffS5YPfroozZp0qR9yqk6UTnvv//+8H16zVatWrneHvXOBO/BAw88YDUZ4QNAtaTGRUfIkXRUe/nll++zr46+1SgpfKhnQkf0GqLQ3I9YFCzUiGqoQEfNCiNB+FBD+s4770Q1fAdKPQ4KHgENv2i4o7wUPiLpebdu3XrA/15H+p06dQoHD1FwUIOuBjwIHyeffHLM+QuRrx+5b+R9JZVH74N6H9Rzcu6557rhnTPOOCP8+IsvvmgTJkxwvRsaCvrxxx/dHJ5ICiV6jyNfU3Wt4FFSOVJTU/e5XdzqFvWOKLzEmqSrsgXho3PnzlbTET4AVEtqKI877rio+9TFHovmYWj1SxAAFFI0/l9c+FDDqXCi4KFN+yp86MhZwxKabxDZOB6oOnXq7DPEowa+OGo4NVQUKdYckdI+b1lFhpPiXl+vHeu+ksqjORSff/6566VQz4MmAms+jObHZGVlueGQe++91/WuNGrUyPV6PPjgg8WWIXjNeNeLgk/QG1RU5Jygw4qpp5qEOR8AUISOiDWHozhqpDSB8pVXXrEPPvjAunfv7o7u1XOi4RgNDxTXwAQ9A8UNAZWGhnA0LyNSRZxzol27du6oXnM/ApqvEAxX+KC/dfDgwfb888/bww8/bE888YS7X71MWpVz5513unrXkIaCSrxkZ2fvc1v1Ecupp57qPg/qZVHwjdwIHNEIHwBqPHW15+XluUZLkymfe+4569+/f4n/RkMtmrioVRTqZldDrHkiM2bMKHa+h2hVhVZOBJMRt2/fXuZya3KjJjZqLsb69evdvA6ttog39SzoHChq/PX8GloYPny4W8kRDKNUJE1GVdDTxFQ17prPEwQAhQ3NCVFvh4Y2NPwyZ86cuL22Pg+at6E5G6rfYFguFvXGfPvtt25Oi3rAVJ433njDhgwZEpewWZ0QPgDUeDp6V7e4jlC1bPPaa691kwpLooChBqXostai9xWlyZxqINVDoiWp+ws5JdEwg1ZjaH6JVtfs2LHDrrzySos3rdpRI6qGVa9z0UUXuaEPTbD0Qb1F6enprndJAU89Uwobcv7559vIkSNdIFAQVE+I6iReNJyj19JrK+QpcGrCaix6P9UjpM+A5qZoeE5LajWkFzm3BGa1QkUHDCtZfn6+G7PT0UDRCUPxMH7BJ1G3u+X8X9ddWWSnxO8MiCUZ2fv/JikBPml1w8aNG918CB31AjWNhtfUi1JZp72vat8LpWm/iWIAAMArwgcAAPCKpbYAAMRwkM1KqFbo+QAAAF4RPgAAgFeEDwAlousZQLy/DwgfAGIKTj0dXBYeAAoLC93PyGsQlQUTTgHEpC8XnRwpuNCWTjQVXJcDQM2zd+9edzFFfRfoZHnlQfgAUKzgkuqluQIqgOorISHBUlJSyn0gQvgAUCx9wei047oeSayrpQKoWerWrRuXU8UTPgAc0BBMecd4ASBQqvhyzz33uCOhyK1t27ZR53zXVf2aNm3qrvI4cOBAd9VGAACAQKn7Tk466STbvHlzeHv77bfDj+nKgvPmzXOXIF68eLFt2rTJBgwYUNqXAAAA1Viph100wzWYhBZJV7GbOnWqzZw503r06OHumzZtmrVr186ys7OtW7du8SkxAACoWT0f69evtxYtWtixxx5rgwYNspycHHf/ypUr3YS0Xr16hffVkIxmxWZlZcW31AAAoGb0fHTt2tWmT59uJ554ohtyuffee+0Xv/iFrV271vLy8twsWJ0XIFLz5s3dY8UpKChwWyA/P78sfwcAAKiO4aNPnz7h3zt27OjCyDHHHGMvvfSS1atXr0wFyMjIcCEGAADUDOVarKtejhNOOME2bNjg5oHotKvbtm2L2kerXWLNEQmkp6e7+SLBlpubW54iAQCA6hw+du7caZ9++qk7CVHnzp3dtSAyMzPDj69bt87NCUlNTS32ORITE61hw4ZRGwAAqL5KNexyyy23WL9+/dxQi5bRjhkzxp146NJLL7VGjRrZ0KFDbdSoUdakSRMXIoYPH+6CBytdAABAmcLHF1984YLGN998Y0cccYR1797dLaPV7zJ+/Hh32lWdXEyTSNPS0mzSpEmleQkAAFDNlSp8zJo1q8THk5KSbOLEiW4DAACIpfxXhwEAACgFwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAAKpO+Bg3bpzVqlXLRowYEb5v9+7dNmzYMGvatKnVr1/fBg4caFu2bIlHWQEAQE0OH8uXL7fHH3/cOnbsGHX/yJEjbd68eTZ79mxbvHixbdq0yQYMGBCPsgIAgJoaPnbu3GmDBg2yJ5980g4//PDw/du3b7epU6faQw89ZD169LDOnTvbtGnT7J133rHs7Ox4lhsAANSk8KFhlb59+1qvXr2i7l+5cqXt2bMn6v62bdtaSkqKZWVlxXyugoICy8/Pj9oAAED1Vbu0/2DWrFm2atUqN+xSVF5entWtW9caN24cdX/z5s3dY7FkZGTYvffeW9piAACAmtDzkZubazfffLPNmDHDkpKS4lKA9PR0N1wTbHoNAABQfZUqfGhYZevWrXbqqada7dq13aZJpRMmTHC/q4ejsLDQtm3bFvXvtNolOTk55nMmJiZaw4YNozYAAFB9lWrYpWfPnrZmzZqo+4YMGeLmddx2223WqlUrq1OnjmVmZroltrJu3TrLycmx1NTU+JYcAABU//DRoEED69ChQ9R9hx12mDunR3D/0KFDbdSoUdakSRPXizF8+HAXPLp16xbfkgMAgJox4XR/xo8fbwkJCa7nQytZ0tLSbNKkSfF+GQAAUFPDx6JFi6JuayLqxIkT3QYAAFAU13YBAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAAABeET4AAIBXhA8AAHDwho/Jkydbx44drWHDhm5LTU211157Lfz47t27bdiwYda0aVOrX7++DRw40LZs2VIR5QYAADUhfLRs2dLGjRtnK1eutBUrVliPHj2sf//+9sEHH7jHR44cafPmzbPZs2fb4sWLbdOmTTZgwICKKjsAAKiCapdm5379+kXdHjt2rOsNyc7OdsFk6tSpNnPmTBdKZNq0adauXTv3eLdu3eJbcgAAULPmfPz00082a9Ys27Vrlxt+UW/Inj17rFevXuF92rZtaykpKZaVlVXs8xQUFFh+fn7UBgAAqq9Sh481a9a4+RyJiYl23XXX2Zw5c6x9+/aWl5dndevWtcaNG0ft37x5c/dYcTIyMqxRo0bhrVWrVmX7SwAAQPUMHyeeeKKtXr3ali5datdff70NHjzYPvzwwzIXID093bZv3x7ecnNzy/xcAACgms35EPVuHHfcce73zp072/Lly+2RRx6x3/72t1ZYWGjbtm2L6v3Qapfk5ORin089KNoAAEDNUO7zfOzdu9fN21AQqVOnjmVmZoYfW7duneXk5Lg5IQAAAKXu+dAQSZ8+fdwk0h07driVLYsWLbI33njDzdcYOnSojRo1ypo0aeLOAzJ8+HAXPFjpAgAAyhQ+tm7daldeeaVt3rzZhQ2dcEzBo3fv3u7x8ePHW0JCgju5mHpD0tLSbNKkSaV5CQAAUM2VKnzoPB4lSUpKsokTJ7oNAAAgFq7tAgAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADg4A0fGRkZ1qVLF2vQoIEdeeSRdsEFF9i6deui9tm9e7cNGzbMmjZtavXr17eBAwfali1b4l1uAABQE8LH4sWLXbDIzs62BQsW2J49e+zcc8+1Xbt2hfcZOXKkzZs3z2bPnu3237Rpkw0YMKAiyg4AAKqg2qXZ+fXXX4+6PX36dNcDsnLlSvvlL39p27dvt6lTp9rMmTOtR48ebp9p06ZZu3btXGDp1q1bfEsPAABq1pwPhQ1p0qSJ+6kQot6QXr16hfdp27atpaSkWFZWVsznKCgosPz8/KgNAABUX2UOH3v37rURI0bYmWeeaR06dHD35eXlWd26da1x48ZR+zZv3tw9Vtw8kkaNGoW3Vq1albVIAACgOocPzf1Yu3atzZo1q1wFSE9Pdz0owZabm1uu5wMAANVozkfgxhtvtPnz59uSJUusZcuW4fuTk5OtsLDQtm3bFtX7odUueiyWxMREtwEAgJqhVD0foVDIBY85c+bYm2++aW3atIl6vHPnzlanTh3LzMwM36eluDk5OZaamhq/UgMAgJrR86GhFq1keeWVV9y5PoJ5HJqrUa9ePfdz6NChNmrUKDcJtWHDhjZ8+HAXPFjpAgAASh0+Jk+e7H6effbZUfdrOe1VV13lfh8/frwlJCS4k4tpJUtaWppNmjSJ2gYAAKUPHxp22Z+kpCSbOHGi2wAAAIri2i4AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAAPCK8AEAALwifAAAAK8IHwAAwCvCBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8InwAAACvCB8AAMArwgcAADi4w8eSJUusX79+1qJFC6tVq5bNnTs36vFQKGR33323HXXUUVavXj3r1auXrV+/Pp5lBgAANSl87Nq1yzp16mQTJ06M+fgDDzxgEyZMsClTptjSpUvtsMMOs7S0NNu9e3c8ygsAAKq42qX9B3369HFbLOr1ePjhh+2uu+6y/v37u/ueffZZa968ueshueSSS8pfYgAAUKXFdc7Hxo0bLS8vzw21BBo1amRdu3a1rKyseL4UAACoKT0fJVHwEPV0RNLt4LGiCgoK3BbIz8+PZ5EAAMBBptJXu2RkZLjekWBr1apVZRcJAABUlfCRnJzsfm7ZsiXqft0OHisqPT3dtm/fHt5yc3PjWSQAAFCdw0ebNm1cyMjMzIwaRtGql9TU1Jj/JjEx0Ro2bBi1AQCA6qvUcz527txpGzZsiJpkunr1amvSpImlpKTYiBEj7M9//rMdf/zxLoyMHj3anRPkggsuiHfZAQBATQgfK1assHPOOSd8e9SoUe7n4MGDbfr06Xbrrbe6c4Fcc801tm3bNuvevbu9/vrrlpSUFN+SAwCAmhE+zj77bHc+j+LorKd/+tOf3AYAAHDQrXYBAAA1C+EDAAB4RfgAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV7X9vhzKYvyCT6yqGdn7hMouAgDgIEXPBwAA8IrwAQAAvCJ8AAAArwgfAADAK8IHAADwivABAAC8YqltFdMt54ky/9vslGviWhYAAMqCng8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAADgFeEDAAB4RfgAAABecW0XINLCjLL9u3PS410SAKi26PkAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF6x2gX4f8Yv+MS65XxTpn+b/eMnVhlG9j6hUl4XQPR3R1UzspK/O+j5AAAA1SN8TJw40Vq3bm1JSUnWtWtXW7ZsWUW9FAAAqOnh48UXX7RRo0bZmDFjbNWqVdapUydLS0uzrVu3VsTLAQCAmh4+HnroIbv66qttyJAh1r59e5syZYodeuih9vTTT1fEywEAgJo84bSwsNBWrlxp6en//3TTCQkJ1qtXL8vKytpn/4KCArcFtm/f7n7m5+dbRdi9a2fU7V0/FMTtuXyoKuWtqPevIql+ylq/lfFZqKr1DFQ3lfX//8H23RE8ZygU2v/OoTj78ssv9aqhd955J+r+P/7xj6HTTz99n/3HjBnj9mdjY2NjY2OzKr/l5ubuNytU+lJb9ZBofkhg79699u2331rTpk2tVq1a5UpgrVq1stzcXGvYsGGcSotYqGu/qG9/qGt/qOuqX9fq8dixY4e1aNFiv/vGPXw0a9bMDjnkENuyZUvU/bqdnJy8z/6JiYlui9S4ceO4lUcVywfZD+raL+rbH+raH+q6atd1o0aNKmfCad26da1z586WmZkZ1Zuh26mpqfF+OQAAUMVUyLCLhlEGDx5sp512mp1++un28MMP265du9zqFwAAULNVSPj47W9/a1999ZXdfffdlpeXZz//+c/t9ddft+bNm5svGsrReUaKDukg/qhrv6hvf6hrf6jrmlXXtTTrtNJeHQAA1Dhc2wUAAHhF+AAAAF4RPgAAgFeEDwAA4FW1DR8TJ0601q1bW1JSknXt2tWWLVtW2UWq8jIyMqxLly7WoEEDO/LII+2CCy6wdevWRe2ze/duGzZsmDtDbf369W3gwIH7nHAOpTNu3Dh3tt8RI0aE76Oe4+vLL7+0yy+/3NVnvXr17OSTT7YVK1aEH9e8fK3eO+qoo9zjulbV+vXrK7XMVdFPP/1ko0ePtjZt2rh6/NnPfmb33Xdf1LVAqOuyWbJkifXr18+dXVTfF3Pnzo16/EDqVWcXHzRokDvxmE72OXToUNu5s4KuWxOqhmbNmhWqW7du6Omnnw598MEHoauvvjrUuHHj0JYtWyq7aFVaWlpaaNq0aaG1a9eGVq9eHTrvvPNCKSkpoZ07d4b3ue6660KtWrUKZWZmhlasWBHq1q1b6IwzzqjUcldly5YtC7Vu3TrUsWPH0M033xy+n3qOn2+//TZ0zDHHhK666qrQ0qVLQ5999lnojTfeCG3YsCG8z7hx40KNGjUKzZ07N/Tee++Fzj///FCbNm1CP/zwQ6WWvaoZO3ZsqGnTpqH58+eHNm7cGJo9e3aofv36oUceeSS8D3VdNq+++mrozjvvDL388svu+ipz5syJevxA6vVXv/pVqFOnTqHs7OzQW2+9FTruuONCl156aagiVMvwoQvYDRs2LHz7p59+CrVo0SKUkZFRqeWqbrZu3eo+5IsXL3a3t23bFqpTp477Qgl89NFHbp+srKxKLGnVtGPHjtDxxx8fWrBgQeiss84Khw/qOb5uu+22UPfu3Yt9fO/evaHk5OTQX//61/B9eg8SExNDL7zwgqdSVg99+/YN/e53v4u6b8CAAaFBgwa536nr+CgaPg6kXj/88EP375YvXx7e57XXXgvVqlXLXTA23qrdsEthYaGtXLnSdSkFEhIS3O2srKxKLVt1s337dvezSZMm7qfqfc+ePVF137ZtW0tJSaHuy0DDKn379o2qT6Ge4+uf//ynOxvzxRdf7IYTTznlFHvyySfDj2/cuNGdLDGyvnX9Cg3nUt+lc8YZZ7hLbXzyySfu9nvvvWdvv/229enTx92mrivGgdSrfmqoRf8vBLS/2s+lS5fGvUyVflXbePv666/duGLRs6nq9scff1xp5apudL0ezUE488wzrUOHDu4+fbh1bZ+iFwZU3esxHLhZs2bZqlWrbPny5fs8Rj3H12effWaTJ092l4W44447XJ3fdNNNro51mYigTmN9p1DfpXP77be7K6oqLOsCpPquHjt2rJtnINR1xTiQetVPhe9ItWvXdgeXFVH31S58wN9R+dq1a91RC+JLl7m++eabbcGCBW7CNCo+SOto7/7773e31fOhz/aUKVNc+ED8vPTSSzZjxgybOXOmnXTSSbZ69Wp3EKNJktR1zVLthl2aNWvmEnXRmf+6nZycXGnlqk5uvPFGmz9/vi1cuNBatmwZvl/1q2Gvbdu2Re1P3ZeOhlW2bt1qp556qjvy0LZ48WKbMGGC+11HK9Rz/Gj2f/v27aPua9euneXk5LjfgzrlO6X8/vjHP7rej0suucStKLriiits5MiRbiWdUNcV40DqVT/1vRPpxx9/dCtgKqLuq134UFdp586d3bhi5JGNbqemplZq2ao6zWNS8JgzZ469+eabbrlcJNV7nTp1oupeS3H1JU7dH7iePXvamjVr3FFhsOnIXF3Twe/Uc/xo6LDoknHNSTjmmGPc7/qc68s3sr41dKBxcOq7dL7//ns3hyCSDhb1HS3UdcU4kHrVTx3Q6OAnoO95vTeaGxJ3oWq61FazeKdPn+5m8F5zzTVuqW1eXl5lF61Ku/76691SrUWLFoU2b94c3r7//vuoJaBafvvmm2+6JaCpqaluQ/lErnYR6jm+y5lr167tloGuX78+NGPGjNChhx4aev7556OWKeo75JVXXgm9//77of79+7P8swwGDx4cOvroo8NLbbUstFmzZqFbb701vA91XfbVce+++67b1LQ/9NBD7vfPP//8gOtVS21POeUUt+T87bffdqvtWGpbSo8++qj7ctb5PrT0VuuWUT76QMfadO6PgD7IN9xwQ+jwww93X+AXXnihCyiIb/ignuNr3rx5oQ4dOriDlrZt24aeeOKJqMe1VHH06NGh5s2bu3169uwZWrduXaWVt6rKz893n2N9NyclJYWOPfZYd26KgoKC8D7UddksXLgw5vezAt+B1us333zjwobOvdKwYcPQkCFDXKipCLX0n/j3pwAAANSQOR8AAODgRvgAAABeET4AAIBXhA8AAOAV4QMAAHhF+AAAAF4RPgAAgFeEDwAA4BXhAwAAeEX4AAAAXhE+AACAV4QPAABgPv0v6qomIQJ+6yEAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "import neps\n", - "from neps import algorithms\n", - "from functools import partial\n", - "import matplotlib.pyplot as plt\n", - "global_values = []\n", - "eta=3\n", - "for algo in [partial(algorithms.neps_hyperband, eta=eta), \n", - " partial(algorithms.hyperband, eta=eta),]:\n", - " # partial(algorithms.neps_hyperband, sampler=\"prior\", eta=eta)]: \n", - " # partial(algorithms.hyperband, sampler=\"prior\", eta=eta)]:\n", - " neps.run(\n", - " evaluate_pipeline,\n", - " SimpleSpace(),\n", - " root_directory=\"neps_test_runs/algo_tests3\",\n", - " overwrite_root_directory=True,\n", - " optimizer=algo,\n", - " fidelities_to_spend=473\n", - " )\n" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "73b9e3d4", - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAGdCAYAAAA44ojeAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAALaNJREFUeJzt3Q2czXXe//HPCEPuby4mi6jsIrohMbi2LVrJ1bLZWq1aydIWFbZVtuhqS0pCSpQV2kjZjeIqXXaIbOO+bCJ05YqtjNpi3GQov//j/X1cv/M/Z+6McY75npnX8/E4zZwb53zne07n+/59b37flCAIAgMAAPBIuZIuAAAAQG4EFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAd8pbEjp+/Lh9/vnnVq1aNUtJSSnp4gAAgCLQuWEPHDhgDRo0sHLlypW+gKJw0qhRo5IuBgAAKIbdu3dbw4YNS19AUc9J+AdWr169pIsDAACKIDs723UwhO14qQso4bCOwgkBBQCA5FKU6RlMkgUAAN4hoAAAAO8QUAAAgHeScg4KAP+WDn733Xf2/fffl3RRAJSgM844w8qXLx+XU4AQUACckqNHj9oXX3xhhw8fLumiAPDAmWeeaWeddZZVrFjxlJ6HgALglE6auHPnTnfUpBMv6QuJkycCZbcn9ejRo/bll1+674VmzZqd8GRshSGgACg2fRkppOi8BjpqAlC2Va5c2SpUqGCffvqp+36oVKlSsZ+LSbIATtmpHCUBKF3Kxen7gG8VAADgHQIKAKBQP/nJT2zo0KGFPmbWrFlWs2ZN86F8mrDdu3dvd6ZxzYnat29fiZQrGTRp0sQmTZpkPmIOCoCEmLh0+2l7rWFX/vCk/83NN99ss2fPtrFjx9q9994buX3hwoX285//3E34Kylq7NXg5tewqsFdsGCB9erVK3I9FE5W/sUvfuH+rtTU1LiU59VXX3XzCqIbNZXvRKHldMldPr2v77zzjr377rtWt25dq1GjRomWD8VDDwqAMksT+B577DH75ptvLJnNnDnTLfXWyolnnnnG/vznP9vDDz8ct+evXbt2kTZ3Kym5y/c///M/1qJFC2vVqpWlpaUVa2WZzumjCeAonCbCJgoBBUCZ1bVrV9eAqbehMKtWrbJ///d/dysUtGLpzjvvtEOHDsX0KDz00EN2ww03WJUqVewHP/iBTZkyJXK/emP+8z//0xo3bux6NdTLoeeIFw2t6O9Q2f7jP/7DevbsaRs3bizw8ephGTJkSOS6ekLUiH/00UeRRkd/x9/+9rc8Qyj6XSs0hg0b5v5N7sb/rbfecuGgatWqdtVVV7ngdDLDQurBin5O1dtFF13kQpfqWb0hffr0sQMHDkQek7t8TzzxhK1cudI9j66LQuivf/1rq1Wrlltx1r17d9uxY0eesrz++uvWsmVL9z7t2rXLvabCnv6t/qazzz7bPUZLaVXPuu2CCy6w9evXF/h3nuj91992ySWXuJCl9/FXv/qV7d27N3L/22+/7f4W1e3FF1/sPodXXHGFe8ybb77p6lvDWfp30ecj0t+u91kX1Zt6k0aNGlVo76B67X7zm9/Yv/3bv7nn1Ots2rQpz/vxpz/9yZo2bXpKq3ROhIACoMzSkMgjjzxiTz31lP3zn//M9zE6GldDqzkN//jHP+zll192gSW6gZfHH3/cLrzwQnvvvffckNFdd91lS5cudff99a9/tYkTJ9qzzz7rGkU1wq1bt07I37R9+3ZbtmyZtW/fvsDHXHbZZa7RC61YscI1XuFt69ats2PHjlnHjh3zHU5p2LCh/fGPf3ThIzqAqHEcP368a3AVENTA33333af8N+k9UJ0tXrzYXVTeRx99NN/HqnwDBw609PR0VzZdD4f0FCIULjIzM10jffXVV7u/M7r86lFT4/vhhx9avXr13O167zp16uTe2x49ethNN93kAsuNN97oguC5557rrhfU8J/o/VcZFHAVBHTf//7v/7ry5qZw8PTTT7uhq927d9v111/v5o/MnTvX/uu//sv++7//232Wo2m4S2d2Xbt2rT355JM2YcIE9/cV5LrrrosEnw0bNlibNm2sS5cu9vXXX0ce8/HHH7u/SXX7/vvvW6IwByUfmTNO/X+o0y19wPiSLgKQlDTfREeEDzzwgM2YMSPP/epd6du3b+QIXSefmjx5smvkp06dGjmCVAMWzmX54Q9/aH//+99do3TllVe6hlpHxuqx0VwJHUlfeumlhZZr//797ui8KNRzo7Cl7QZycnJcL8rIkSMLfLyOrBWg1AugxmvLli3uyFoB5be//a372a5du3zPbaPhFL1WeLQfTQ3ttGnTXIMtCnEKMqdKQy3q4QiHcRQQMjIybMyYMfmWT+XWSQPD8ikUKJjoPQlD15w5c1yPkwKBGuWw/BoiU9CMpiBz6623ut9Hjx7t3nfVT/jv7rnnHheIsrKy8tSJnOj9v+WWWyK/n3POOe7z1a5dOzt48GDMZ0A9OfqcyYABA9x7rPCmfxP2jC1fvtyVJ6S/UZ9D9cD86Ec/sg8++MBdV4jLTcFbQUYBJZy/pMCpOvrLX/5igwYNivSwvfDCC66XJZHoQQFQ5umoWUeaW7duzXOfjmrVOKqhCC/dunWLnEU3pAYqmq6Hz6eG7Ntvv3UNiRoGTXJVmCiMGmMdnea+5EcNju5TWdXDoF4UNeIF0dwMNeTqidBkUg0bKNTouuhnODRyMhQMwnAiOt159FBFcWmYJXqOyck+r94HBbHoXqU6deq4Bjv6PVeo0XBNbtG31a9f3/2M7gEJbyuoTCd6/9VTcc0117jgor9T4TcMNoWVQ/UdhpPwttxl6NChQ8yQmT6XCmz57Zulz49Ckeom+vOuz7mCUEjDXIkOJ0IPCoAy78c//rELHToizd21ri9sHT3nN2dEDUpR6Ch227Ztbk6Hhn1uv/12NySkIBC9+iT3ya7OO++8Ij2/js7Dx6rR1fwM9aroiDu/51CDpb9ZPSU6UlYYUeOn3pfNmze7IYTiDM3k/lv0OoXNd9DfmPv+6CGXwp43ERNYNbcjvwm10a8f3p/fbQWVqbD3X70R+uzpol4dNfwKJt26dcszATX3a8a7XvRZV/iLHv4LRc8V0vyk04GAAgBmbk6DhnrUwEfTGLyGQE4UFlavXp3nuiYvRjd+OkrWZfDgwda8eXPX3a7njzcNwYiO2guio/Tp06e7gKKhEoUFhRY1nAoq4VBCftTTEI+dq9UYK0xpwnHY6CViToPeB/VYrFmzJjLE869//cuFBk2IPR0Kev8V0FQWff4UZKSwCbcnS39z7s+lhinDz0g0fRb37NnjepvUa1XSGOIBgP/rstdcE43/R9N4vnoUNJ9Cjae6x1977bU8k2Q1v2HcuHFueEUreObPn+/meYiGiDS/Rb0Tn3zyib344ouuwVJXeTxo5YUals8//9wdlWveh+bBRAek3NRrouClyaCdO3eO3KajeK0oKewoWY2XJsF+9tln9tVXXxW73Bpy0TDFH/7wBzeEoMmeqqt4U4OsFTcaXtE8Cw1laIKrVlvp9kQr7P1XL5wCnya36j7NldGE2XhRb8zw4cNdGHvppZfc64Sfy9w0R0ZDQDrHjibcarKuPvv33XdfXENTURFQAOD/qGHP3UWuoQ81+goeWmqs+RqaKKmlotF+97vfuS9x3a+hFa2WUDd92D2u3gr1Suj51NW/aNEiN9YfD/3793dd81pdo6Gd888/363C0JFwYYFM5VKvUTgRUwFFPSMnmn+ielLjpfkmpzIXQfNg1Fi/8cYbrjxqQLVSJVHnimnbtq2ba6NGWD0Xet2ChtjiqbD3X/WnAKNAq94c9aRoYmq8aHWRetI0KVc9Nwon4WTX3DREpDpRT5o+Uwq5WtKtZeXhPJvTKSUoydMlFlN2drZb061Z7lqnHW+s4gGK5siRI24CXaLPh+A7386sCoiCpgLo6T6VfWHfCyfTftODAgAAvENAAQAA3mEVDwCcIs3HAHzzdj7LhZMJPSgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoACAB3Sa8YULFxb6GO20rH1SfCjfRx99ZB06dHBnCtXZSlHwEnTVXSI2QSztOA8KgMRYPvb0vdblI0/6n6ix1yZ7uUOBzh1x+eWX2zfffOP2UAmvh9Qgn3POOYXuaVIcX3zxhdWqVSvSqOk04e+99543jX90+eSBBx5wGwpqE7pwLx8gnggoAFAEaoi1d4g2XtNGb7fddpvbLK9Lly5xef60tDTzWe7yaffhHj16nNKOzEePHnU7+aJwR8toPTHEAwBFUK9ePddIq2fjzjvvdD83btyY72O1B6t2qf3LX/4SuU09IdpxOLRq1SpLTU21w4cP5xlC0XOLdkbW7bl3F9Zut3ou7YarHWqPHTt2UsNC2tQw+jn1u/6mESNGuB2G9Xfm3lU4unz6fcOGDW5XY/0ePvaDDz6wK664wipXruzKph6mgwcP5inLmDFj3G7QP/rRjyJDIK+88orbLVr/tl27dm736HXr1tkll1ziemi6d+9uX375ZYF/p3q8+vbt6+pdz9GsWTO3g3Honnvucbvznnnmma4HbNSoUTH1pr9B79Hzzz9vjRs3dq95++23u92dx40b5+pEnwGVPXe9TJ061ZVPr6vnjn7f87N582b3eL2Gdgm+6aab7Kuvvop5P4YMGeLep7p160Z2xS5rCCgAcBIUPpYsWWK7du2y9u3b5/sYNVrasj481bgaz61bt7reF83dkBUrVriGWA1mbmvXrnU///a3v7mhlVdffTVy3/Lly13vhX7Onj3bZs2a5S6nSs+lIZs1a9a4BlnhY+nSpfk+VmU6//zz7Xe/+537/e6777ZDhw65hlTDQAoW8+fPd+VXQxstIyPD9UbpuRcvXhwzZHT//fe70Fe+fHn71a9+5QLTk08+ae+88459/PHHNnr06ALLr8CxZcsWe/PNN11dKzSocQ9Vq1bN1ZMeo+ecPn26TZw4MeY5VK/693p/X3rpJZsxY4brJfrnP//p3q/HHnvMlVF1lPu1e/fubZs2bXIhqU+fPq4M+dGwokKcwuf69evda2VlZdn111+f5/2oWLGi/f3vf7dp06ZZWcQQD4AySw1k7vkTOmLOT8OGDd3PnJwcO378uGvAFUIKoqPgZ5991v2+cuVK1yDpKFyhpXnz5u7nZZddlu+/VS+AqBci99CKAsDTTz9tZ5xxhnseNaBq9AcOHGin4oILLnAhQdT7oNfQ81555ZV5HqsyKUSo7sLyqcE/cuSIvfDCCy7oiJ7jmmuucQ27egpE9/3pT3+KDFmE+xgp5IQ9BZrfc8MNN7jX79Spk7ttwIABhQYxBUbVsXpcpEmTJjH3K1iEdJ9eb968eS4EhfS+qgdFYaZly5Zu7pHC1BtvvGHlypVzPT76WxQOo8PpddddZ7/5zW/c7w899JALX0899ZQ988wzecqpOlE5H3nkkchtes1GjRq5XiP18oTvwbhx46wsI6AAKLPUAOlIO5qOjm+88cY8j9VRvBouBRT1cKhnQMMhmouSH4UPNbQaltDRtwJLGFDU2L777rsxjWNRqedC4SSkoR4NrZwqBZRoet69e/cW+d+rx+DCCy+MhBNRuFCjr0Y+DCitW7fOdz5F9OtHPzb6tsLKo/dBvRjqgfnpT3/qhpI6duwYuf/ll1+2yZMnu14SDTt99913bk5RNAUXvcfRr6m6VjgprBzp6el5rhe0ake9LAo4+U0sVtnCgNK2bVsr6wgoAMosNabnnXdezG3qzs+P5oVoVU8YEhRkNB+hoICixlUBRuFEFz1WAUVH4BoC0fyH6Aa0qCpUqJBnOEkhoCBqXDUsFS2/OSsn+7zFFR1gCnp9vXZ+txVWHs3p+PTTT11vh3owNHlZ83M0XyczM9MNvTz44IOul6ZGjRqu9+SJJ54osAzha8a7XhSOwl6l3KLnKFUpoJ7KEuagAEAx6Mhac0oKooZMkz5fe+01+/DDD61z586ul0A9MBr60VBEQY1Q2MNQ0HDTydBwkeaJREvEOTlatGjhegc0FyWk+RPh0MjpoL+1X79+9uKLL9qkSZPsueeec7ert0qrje677z5X7xo+UZiJl9WrV+e5rvrIT5s2bdznQb01CsfRF0JJLAIKABSBuvX37NnjGjZNAP3zn/9sPXv2LPTfaFhHky21OkRd+mqsNW9lzpw5Bc4/Ea0W0YqQcALl/v37i11uTcjUZEzNDdmxY4ebZ6JVJPGmHgqdI0YBQc+vYYw77rjDrVAJh2wSSRNoFQY1mVYBQPOLwpCgQKI5Kuo10TCKhnoWLFgQt9fW50HzSDSHRPUbDgHmR706X3/9tZtjo540leett96y/v37xyWQliYEFAAoAvUCqAteR7pasnrrrbe6iZCFUQhRo5N7SW/u23LTBFQ1oupp0XLcEwWhwmhIQ6tMNN9Fq4YOHDhgv/71ry3etBpJDa0aX73OL37xCzfMokmhp4N6nUaOHOl6qRQC1cOlQCI/+9nPbNiwYS40KCyqR0V1Ei8aOtJr6bUVBBVKNck2P3o/1bOkz4DmymgoUMuJNXwYPdcFZilB7sHJJJCdne3GEHVUkXuSUzxkzrjbkk36gPElXQSUQVq1sXPnTjc/Q0fPQFmjoTz1xpTUFgTJ9r1wMu03cQ0AAHiHgAIAALzDMmMAAIopCWdJJA16UAAAgHcIKAAAwDsEFACnjG5uAPH+PiCgACi28DTghw8fLumiAPBE+H2Qe5uAk8UkWQDFppNh6QRT4eZpOllXuI8KgLLXc3L48GH3faDvhehNLYuDgALglGgDPDmZnW8BlF41a9aMfC+c1oCycuVKe/zxx23Dhg1uA6rcZ9BTgtJeBNOnT7d9+/a57ba1nbn2QgjpVMjao2HRokXu1L7aIvvJJ5/Md/tpAH5Tj4lOAa/9Y/LbJRdA2VGhQoVT7jkpdkDRTpUXXnih3XLLLXbttdfmuX/cuHFuD4nZs2e709xqvwPtBbFly5bIKW+1qZTCjbbE1heaNkkaNGiQzZ07Ny5/FIDTT19K8fpiAoCTDijdu3d3l/yo90RbXN9///2Rza20cZJ2sly4cKH16dPHtm7d6nbo1C6O2vZatOHW1VdfbePHj3cbKQEAgLItrqt4tDmQtiPv2rVr5DZtCtS+fXvLzMx01/VT41NhOBE9XkM9a9asyfd5c3Jy3AZD0RcAAFB6xTWgKJyIekyi6Xp4n35qrDr31uK1a9eOPCa3sWPHuqATXho1ahTPYgMAAM8kxXlQRo4c6bZmDi+7d+8u6SIBAIBkCSjhsqKsrKyY23U9vE8/cy9H/O6779zKnoKWJaWmplr16tVjLgAAoPSKa0DRqh2FjIyMjMhtmi+iuSXp6enuun5q+bGWKYeWLVtmx48fd3NVAAAATnoVz8GDB+3jjz+OmRj7/vvvuzkkjRs3tqFDh9rDDz/sznsSLjPWypzwXCktWrSwq666ygYOHGjTpk1zy4yHDBniVviwggcAABQroKxfv94uv/zyyPXhw4e7n/369bNZs2bZiBEj3LlSdF4T9ZR07tzZLSsOz4Eic+bMcaGkS5cukRO16dwpAAAAkhIk4TakGjbSah5NmE3EfJTMGXdbskkfML6kiwAAQNza76RYxQMAAMoWAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAJT+gPL999/bqFGjrGnTpla5cmU799xz7aGHHrIgCCKP0e+jR4+2s846yz2ma9eutmPHjngXBQAAJKm4B5THHnvMpk6dak8//bRt3brVXR83bpw99dRTkcfo+uTJk23atGm2Zs0aq1KlinXr1s2OHDkS7+IAAIAkVD7eT/juu+9az549rUePHu56kyZN7KWXXrK1a9dGek8mTZpk999/v3ucvPDCC1a/fn1buHCh9enTJ95FAgAAZb0HpWPHjpaRkWHbt2931zdt2mSrVq2y7t27u+s7d+60PXv2uGGdUI0aNax9+/aWmZmZ73Pm5ORYdnZ2zAUAAJRece9Buffee12AaN68uZ1xxhluTsqYMWOsb9++7n6FE1GPSTRdD+/LbezYsfbggw/Gu6gAAKCs9KC88sorNmfOHJs7d65t3LjRZs+ebePHj3c/i2vkyJG2f//+yGX37t1xLTMAACjlPSi///3vXS9KOJekdevW9umnn7pekH79+llaWpq7PSsry63iCen6RRddlO9zpqamugsAACgb4t6DcvjwYStXLvZpNdRz/Phx97uWHyukaJ5KSENCWs2Tnp4e7+IAAIAkFPcelGuuucbNOWncuLGdf/759t5779mECRPslltucfenpKTY0KFD7eGHH7ZmzZq5wKLzpjRo0MB69eoV7+IAAIAkFPeAovOdKHDcfvvttnfvXhc8br31VndittCIESPs0KFDNmjQINu3b5917tzZlixZYpUqVYp3cQAAQBJKCaJP8ZokNCSkpcmaMFu9evW4P3/mjLst2aQPGF/SRQAAIG7tN3vxAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAACgbASUzz77zG688UarU6eOVa5c2Vq3bm3r16+P3B8EgY0ePdrOOussd3/Xrl1tx44diSgKAABIQnEPKN9884116tTJKlSoYG+++aZt2bLFnnjiCatVq1bkMePGjbPJkyfbtGnTbM2aNValShXr1q2bHTlyJN7FAQAASah8vJ/wscces0aNGtnMmTMjtzVt2jSm92TSpEl2//33W8+ePd1tL7zwgtWvX98WLlxoffr0iXeRAABAWe9Bef311+2SSy6x6667zurVq2cXX3yxTZ8+PXL/zp07bc+ePW5YJ1SjRg1r3769ZWZm5vucOTk5lp2dHXMBAAClV9wDyieffGJTp061Zs2a2VtvvWW33Xab3XnnnTZ79mx3v8KJqMckmq6H9+U2duxYF2LCi3poAABA6RX3gHL8+HFr06aNPfLII673ZNCgQTZw4EA336S4Ro4cafv3749cdu/eHdcyAwCAUh5QtDKnZcuWMbe1aNHCdu3a5X5PS0tzP7OysmIeo+vhfbmlpqZa9erVYy4AAKD0intA0Qqebdu2xdy2fft2O/vssyMTZhVEMjIyIvdrTolW86Snp8e7OAAAIAnFfRXPsGHDrGPHjm6I5/rrr7e1a9fac8895y6SkpJiQ4cOtYcfftjNU1FgGTVqlDVo0MB69eoV7+IAAIAkFPeA0q5dO1uwYIGbN/LHP/7RBRAtK+7bt2/kMSNGjLBDhw65+Sn79u2zzp0725IlS6xSpUrxLg4AAEhCKYFOTJJkNCSk1TyaMJuI+SiZM+62ZJM+YHxJFwEAgLi13+zFAwAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAACAshdQHn30UUtJSbGhQ4dGbjty5IgNHjzY6tSpY1WrVrXevXtbVlZWoosCAACSREIDyrp16+zZZ5+1Cy64IOb2YcOG2aJFi2z+/Pm2YsUK+/zzz+3aa69NZFEAAEASSVhAOXjwoPXt29emT59utWrVity+f/9+mzFjhk2YMMGuuOIKa9u2rc2cOdPeffddW716daKKAwAAkkjCAoqGcHr06GFdu3aNuX3Dhg127NixmNubN29ujRs3tszMzEQVBwAAJJHyiXjSefPm2caNG90QT2579uyxihUrWs2aNWNur1+/vrsvPzk5Oe4Sys7OTkCpAQBAqe1B2b17t9111102Z84cq1SpUlyec+zYsVajRo3IpVGjRnF5XgAAUEYCioZw9u7da23atLHy5cu7iybCTp482f2unpKjR4/avn37Yv6dVvGkpaXl+5wjR450c1fCi0IQAAAoveI+xNOlSxf74IMPYm7r37+/m2dyzz33uN6PChUqWEZGhlteLNu2bbNdu3ZZenp6vs+ZmprqLgAAoGyIe0CpVq2atWrVKua2KlWquHOehLcPGDDAhg8fbrVr17bq1avbHXfc4cJJhw4d4l0cAACQhBIySfZEJk6caOXKlXM9KJr82q1bN3vmmWdKoigAAKCsBpS333475romz06ZMsVdAAAAcmMvHgAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAASn9AGTt2rLVr186qVatm9erVs169etm2bdtiHnPkyBEbPHiw1alTx6pWrWq9e/e2rKyseBcFAAAkqbgHlBUrVrjwsXr1alu6dKkdO3bMfvrTn9qhQ4cijxk2bJgtWrTI5s+f7x7/+eef27XXXhvvogAAgCRVPt5PuGTJkpjrs2bNcj0pGzZssB//+Me2f/9+mzFjhs2dO9euuOIK95iZM2daixYtXKjp0KFDvIsEAACSTMLnoCiQSO3atd1PBRX1qnTt2jXymObNm1vjxo0tMzMz3+fIycmx7OzsmAsAACi9EhpQjh8/bkOHDrVOnTpZq1at3G179uyxihUrWs2aNWMeW79+fXdfQfNaatSoEbk0atQokcUGAAClOaBoLsrmzZtt3rx5p/Q8I0eOdD0x4WX37t1xKyMAACgDc1BCQ4YMscWLF9vKlSutYcOGkdvT0tLs6NGjtm/fvpheFK3i0X35SU1NdRcAAFA2xL0HJQgCF04WLFhgy5Yts6ZNm8bc37ZtW6tQoYJlZGREbtMy5F27dll6enq8iwMAAJJQ+UQM62iFzmuvvebOhRLOK9HckcqVK7ufAwYMsOHDh7uJs9WrV7c77rjDhRNW8AAAgIQElKlTp7qfP/nJT2Ju11Lim2++2f0+ceJEK1eunDtBm1bodOvWzZ555hneEQAAkJiAoiGeE6lUqZJNmTLFXQAAAHJjLx4AAOAdAgoAAPAOAQUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8Q0ABAADeIaAAAADvEFAAAIB3CCgAAMA7BBQAAOAdAgoAAPAOAQUAAHinfEkXAPGROeNuSzbpA8aXdBEAAJ6iBwUAAHiHgAIAALxDQAEAAN4hoAAAAO8QUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAADgHQIKAADwDgEFAAB4h4ACAAC8U76kCwAkk8wZd1uySR8wvqSLAAAnjR4UAADgHQIKAADwDgEFAAB4h4ACAAC8U6IBZcqUKdakSROrVKmStW/f3tauXVuSxQEAAGV9Fc/LL79sw4cPt2nTprlwMmnSJOvWrZtt27bN6tWrV1LFAgDAyvrqPx9WAJZYD8qECRNs4MCB1r9/f2vZsqULKmeeeaY9//zzJVUkAABQlntQjh49ahs2bLCRI0dGbitXrpx17drVMjMz8zw+JyfHXUL79+93P7OzsxNSvkPf/v/XQuIk6v1LpGT8bCRjPQOlSTJ+byTquyN8ziAI/AwoX331lX3//fdWv379mNt1/aOPPsrz+LFjx9qDDz6Y5/ZGjRoltJxIsDueLukSlA3UMwDPvjsOHDhgNWrUSP4zyaqnRfNVQsePH7evv/7a6tSpYykpKaeU5BRydu/ebdWrV49TaZEf6vr0oa5PH+r69KGuS0d9q+dE4aRBgwYnfGyJBJS6devaGWecYVlZWTG363paWlqex6emprpLtJo1a8atPKp8PvCnB3V9+lDXpw91ffpQ18lf3yfqOSnRSbIVK1a0tm3bWkZGRkyviK6np6eXRJEAAIBHSmyIR0M2/fr1s0suucQuvfRSt8z40KFDblUPAAAo20osoPzyl7+0L7/80kaPHm179uyxiy66yJYsWZJn4mwiadjogQceyDN8hPijrk8f6vr0oa5PH+q67NV3SlCUtT4AAACnEXvxAAAA7xBQAACAdwgoAADAOwQUAADgnTIdUKZMmWJNmjSxSpUquR2V165dW9JFSmrakqBdu3ZWrVo1tyN1r1693O7U0Y4cOWKDBw92ZwGuWrWq9e7dO88J+3DyHn30UXdW5aFDh0Zuo67j67PPPrMbb7zR1WflypWtdevWtn79+sj9Wm+gVYlnnXWWu197i+3YsaNEy5yMtA3KqFGjrGnTpq4ezz33XHvooYdi9m6hrotn5cqVds0117izuOr7YuHChTH3F6VedRb3vn37upO36YSpAwYMsIMHD1pCBGXUvHnzgooVKwbPP/988OGHHwYDBw4MatasGWRlZZV00ZJWt27dgpkzZwabN28O3n///eDqq68OGjduHBw8eDDymN/+9rdBo0aNgoyMjGD9+vVBhw4dgo4dO5ZouZPd2rVrgyZNmgQXXHBBcNddd0Vup67j5+uvvw7OPvvs4Oabbw7WrFkTfPLJJ8Fbb70VfPzxx5HHPProo0GNGjWChQsXBps2bQp+9rOfBU2bNg2+/fbbEi17shkzZkxQp06dYPHixcHOnTuD+fPnB1WrVg2efPLJyGOo6+J54403gvvuuy949dVXlfaCBQsWxNxflHq96qqrggsvvDBYvXp18M477wTnnXdecMMNNwSJUGYDyqWXXhoMHjw4cv37778PGjRoEIwdO7ZEy1Wa7N271/1PsGLFCnd93759QYUKFdwXTmjr1q3uMZmZmSVY0uR14MCBoFmzZsHSpUuDyy67LBJQqOv4uueee4LOnTsXeP/x48eDtLS04PHHH4/cpvcgNTU1eOmll05TKUuHHj16BLfcckvMbddee23Qt29f9zt1HR+5A0pR6nXLli3u361bty7ymDfffDNISUkJPvvssyDeyuQQz9GjR23Dhg2u+ypUrlw5dz0zM7NEy1aa7N+/3/2sXbu2+6k6P3bsWEy9N2/e3Bo3bky9F5OGcHr06BFTp0Jdx9frr7/uznp93XXXueHLiy++2KZPnx65f+fOne6Ek9H1rf1GNHRMfZ+cjh07um1Ptm/f7q5v2rTJVq1aZd27d3fXqevEKEq96qeGdfT/QkiPV/u5Zs2auJcpKXYzjrevvvrKjXPmPmutrn/00UclVq7SRHsraT5Ep06drFWrVu42ffi1D1PujR5V77oPJ2fevHm2ceNGW7duXZ77qOv4+uSTT2zq1Klui44//OEPrs7vvPNOV8fasiOs0/y+U6jvk3Pvvfe6nXQVqLWprL6rx4wZ4+Y9CHWdGEWpV/1UQI9Wvnx5dxCaiLovkwEFp+fIfvPmze7IB/GnLdDvuusuW7p0qZvkjcQHbh01PvLII+66elD0+Z42bZoLKIifV155xebMmWNz5861888/395//313sKOJndR12VImh3jq1q3rknnuFQ26npaWVmLlKi2GDBliixcvtuXLl1vDhg0jt6tuNby2b9++mMdT7ydPQzh79+61Nm3auCMYXVasWGGTJ092v+uoh7qOH61qaNmyZcxtLVq0sF27drnfwzrlO+XU/f73v3e9KH369HErpW666SYbNmyYWyUo1HViFKVe9VPfO9G+++47t7InEXVfJgOKumXbtm3rxjmjj5B0PT09vUTLlsw070rhZMGCBbZs2TK3TDCa6rxChQox9a5lyPqSp95PTpcuXeyDDz5wR5fhRUf46gYPf6eu40dDlbmXzGuOxNlnn+1+12ddX9DR9a1hCo3LU98n5/Dhw25OQzQdUOo7WqjrxChKveqnDnp0gBTSd73eG81VibugDC8z1uzkWbNmuZnJgwYNcsuM9+zZU9JFS1q33XabW6L29ttvB1988UXkcvjw4Zilr1p6vGzZMrf0NT093V1w6qJX8Qh1Hd+l3OXLl3dLYHfs2BHMmTMnOPPMM4MXX3wxZommvkNee+214B//+EfQs2dPlr4WQ79+/YIf/OAHkWXGWhJbt27dYMSIEZHHUNfFX/X33nvvuYua/wkTJrjfP/300yLXq5YZX3zxxW65/apVq9wqQpYZJ8BTTz3lvsB1PhQtO9a6bhSfPvD5XXRulJA+6LfffntQq1Yt9wX/85//3IUYxD+gUNfxtWjRoqBVq1buwKZ58+bBc889F3O/lmmOGjUqqF+/vntMly5dgm3btpVYeZNVdna2+xzru7lSpUrBOeec487dkZOTE3kMdV08y5cvz/c7WqGwqPX6r3/9ywUSnZumevXqQf/+/V3wSYQU/Sf+/TIAAADFVybnoAAAAL8RUAAAgHcIKAAAwDsEFAAA4B0CCgAA8A4BBQAAeIeAAgAAvENAAQAA3iGgAAAA7xBQAACAdwgoAADAOwQUAABgvvl/GCXMjUDplB8AAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "\n", - "plt.hist(global_values[:len(global_values)//2], alpha=0.5, label='Neps HB with uniform sampler',bins=10)\n", - "plt.hist(global_values[len(global_values)//2:], alpha=0.5, label='HB with uniform sampler',bins=10)\n", - "# plt.hist([v+2 for n,v in enumerate(global_values) if n % 4 == 2], alpha=0.5, label='Neps HB with prior sampler',bins=10)\n", - "# plt.hist([v+3 for n,v in enumerate(global_values) if n % 4 == 3], alpha=0.5, label='HB with prior sampler',bins=10)\n", - "plt.legend()\n", - "plt.show()\n" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "id": "70b97bfb", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Count of 1 in algo 0: 81\n", - "Count of 3 in algo 0: 32\n", - "Count of 11 in algo 0: 9\n", - "Count of 33 in algo 0: 3\n", - "Count of 100 in algo 0: 1\n", - "Count of 1 in algo 1: 81\n", - "Count of 3 in algo 1: 32\n", - "Count of 11 in algo 1: 9\n", - "Count of 33 in algo 1: 3\n", - "Count of 100 in algo 1: 1\n" - ] - } - ], - "source": [ - "n_algos = 2\n", - "for i in range(n_algos):\n", - " for j in [v for v in range(1000) if v in global_values]:\n", - " le = len(global_values)//2\n", - " print(f\"Count of {j:<3} in algo {i}: \", global_values[le*i:le*(i+1)].count(j))\n" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "938adc12", - "metadata": {}, - "outputs": [], - "source": [ - "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled, Integer, Fidelity\n", - "import neps\n", - "\n", - "# Define the NEPS space for the neural network architecture\n", - "class SimpleSpace(PipelineSpace):\n", - " i1 = Fidelity(Integer(1,100))\n", - " i2 = Integer(0,50)\n", - " i3 = Categorical(['a','b','c'])\n", - "global_values = []\n", - "def evaluate_pipeline(i1, i2, i3, *args, **kwargs):\n", - " # Dummy evaluation function\n", - " global_values.append(i1)\n", - " return {\"objective_to_minimize\": -i2/50 + i1,\n", - " \"cost\": i1}" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "bfd8f206", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0\n", - "0\n", - "Fidelities to spend: 1\n", - "Fidelities to spend: 2\n", - "0\n", - "0\n", - "Fidelities to spend: 3\n", - "0\n", - "0\n", - "Fidelities to spend: 4\n", - "0\n", - "0\n", - "Fidelities to spend: 5\n", - "0\n", - "0\n", - "Fidelities to spend: 6\n", - "0\n", - "0\n", - "Fidelities to spend: 7\n", - "0\n", - "0\n", - "Fidelities to spend: 8\n", - "0\n", - "0\n", - "Fidelities to spend: 9\n", - "0\n", - "0\n", - "Fidelities to spend: 10\n", - "0\n", - "0\n", - "Fidelities to spend: 11\n", - "0\n", - "0\n", - "Fidelities to spend: 12\n", - "0\n", - "0\n", - "Fidelities to spend: 13\n", - "0\n", - "0\n", - "Fidelities to spend: 14\n", - "0\n", - "0\n", - "Fidelities to spend: 15\n", - "0\n", - "0\n", - "Fidelities to spend: 16\n", - "0\n", - "0\n", - "Fidelities to spend: 17\n", - "0\n", - "0\n", - "Fidelities to spend: 18\n", - "0\n", - "0\n", - "Fidelities to spend: 19\n", - "0\n", - "0\n", - "Fidelities to spend: 20\n", - "0\n", - "0\n", - "Fidelities to spend: 21\n", - "0\n", - "0\n", - "Fidelities to spend: 22\n", - "0\n", - "0\n", - "Fidelities to spend: 23\n", - "0\n", - "0\n", - "Fidelities to spend: 24\n", - "0\n", - "0\n", - "Fidelities to spend: 25\n", - "0\n", - "0\n", - "Fidelities to spend: 26\n", - "0\n", - "0\n", - "Fidelities to spend: 27\n", - "0\n", - "0\n", - "Fidelities to spend: 28\n", - "0\n", - "0\n", - "Fidelities to spend: 29\n", - "0\n", - "0\n", - "Fidelities to spend: 30\n", - "0\n", - "0\n", - "Fidelities to spend: 31\n", - "0\n", - "0\n", - "Fidelities to spend: 32\n", - "0\n", - "0\n", - "Fidelities to spend: 33\n", - "0\n", - "0\n", - "Fidelities to spend: 34\n", - "0\n", - "0\n", - "Fidelities to spend: 35\n", - "0\n", - "0\n", - "Fidelities to spend: 36\n", - "0\n", - "0\n", - "Fidelities to spend: 37\n", - "0\n", - "0\n", - "Fidelities to spend: 38\n", - "0\n", - "0\n", - "Fidelities to spend: 39\n", - "0\n", - "0\n", - "Fidelities to spend: 40\n", - "0\n", - "0\n", - "Fidelities to spend: 41\n", - "0\n", - "0\n", - "Fidelities to spend: 42\n", - "0\n", - "0\n", - "Fidelities to spend: 43\n", - "0\n", - "0\n", - "Fidelities to spend: 44\n", - "0\n", - "0\n", - "Fidelities to spend: 45\n", - "0\n", - "0\n", - "Fidelities to spend: 46\n", - "0\n", - "0\n", - "Fidelities to spend: 47\n", - "0\n", - "0\n", - "Fidelities to spend: 48\n", - "0\n", - "0\n", - "Fidelities to spend: 49\n", - "0\n", - "0\n", - "Fidelities to spend: 50\n", - "0\n", - "0\n", - "Fidelities to spend: 51\n", - "0\n", - "0\n", - "Fidelities to spend: 52\n", - "0\n", - "0\n", - "Fidelities to spend: 53\n", - "0\n", - "0\n", - "Fidelities to spend: 54\n", - "0\n", - "0\n", - "Fidelities to spend: 55\n", - "0\n", - "0\n", - "Fidelities to spend: 56\n", - "0\n", - "0\n", - "Fidelities to spend: 57\n", - "0\n", - "0\n", - "Fidelities to spend: 58\n", - "0\n", - "0\n", - "Fidelities to spend: 59\n", - "0\n", - "0\n", - "Fidelities to spend: 60\n", - "0\n", - "0\n", - "Fidelities to spend: 61\n", - "0\n", - "0\n", - "Fidelities to spend: 62\n", - "0\n", - "0\n", - "Fidelities to spend: 63\n", - "0\n", - "0\n", - "Fidelities to spend: 64\n", - "0\n", - "0\n", - "Fidelities to spend: 65\n", - "0\n", - "0\n", - "Fidelities to spend: 66\n", - "0\n", - "0\n", - "Fidelities to spend: 67\n", - "0\n", - "0\n", - "Fidelities to spend: 68\n", - "0\n", - "0\n", - "Fidelities to spend: 69\n", - "0\n", - "0\n", - "Fidelities to spend: 70\n", - "0\n", - "0\n", - "Fidelities to spend: 71\n", - "0\n", - "0\n", - "Fidelities to spend: 72\n", - "0\n", - "0\n", - "Fidelities to spend: 73\n", - "0\n", - "0\n", - "Fidelities to spend: 74\n", - "0\n", - "0\n", - "Fidelities to spend: 75\n", - "0\n", - "0\n", - "Fidelities to spend: 76\n", - "0\n", - "0\n", - "Fidelities to spend: 77\n", - "0\n", - "0\n", - "Fidelities to spend: 78\n", - "0\n", - "0\n", - "Fidelities to spend: 79\n", - "0\n", - "0\n", - "Fidelities to spend: 80\n", - "0\n", - "0\n", - "Fidelities to spend: 81\n", - "0\n", - "0\n", - "Fidelities to spend: 82\n", - "1\n", - "1\n", - "Fidelities to spend: 83\n", - "Fidelities to spend: 84\n", - "Fidelities to spend: 85\n", - "1\n", - "1\n", - "Fidelities to spend: 86\n", - "Fidelities to spend: 87\n", - "Fidelities to spend: 88\n", - "1\n", - "1\n", - "Fidelities to spend: 89\n", - "Fidelities to spend: 90\n", - "Fidelities to spend: 91\n", - "1\n", - "1\n", - "Fidelities to spend: 92\n", - "Fidelities to spend: 93\n", - "Fidelities to spend: 94\n", - "1\n", - "1\n", - "Fidelities to spend: 95\n", - "Fidelities to spend: 96\n", - "Fidelities to spend: 97\n", - "1\n", - "1\n", - "Fidelities to spend: 98\n", - "Fidelities to spend: 99\n", - "Fidelities to spend: 100\n", - "1\n", - "1\n", - "Fidelities to spend: 101\n", - "Fidelities to spend: 102\n", - "Fidelities to spend: 103\n", - "1\n", - "1\n", - "Fidelities to spend: 104\n", - "Fidelities to spend: 105\n", - "Fidelities to spend: 106\n", - "1\n", - "1\n", - "Fidelities to spend: 107\n", - "Fidelities to spend: 108\n", - "Fidelities to spend: 109\n", - "1\n", - "1\n", - "Fidelities to spend: 110\n", - "Fidelities to spend: 111\n", - "Fidelities to spend: 112\n", - "1\n", - "1\n", - "Fidelities to spend: 113\n", - "Fidelities to spend: 114\n", - "Fidelities to spend: 115\n", - "1\n", - "1\n", - "Fidelities to spend: 116\n", - "Fidelities to spend: 117\n", - "Fidelities to spend: 118\n", - "1\n", - "1\n", - "Fidelities to spend: 119\n", - "Fidelities to spend: 120\n", - "Fidelities to spend: 121\n", - "1\n", - "1\n", - "Fidelities to spend: 122\n", - "Fidelities to spend: 123\n", - "Fidelities to spend: 124\n", - "1\n", - "1\n", - "Fidelities to spend: 125\n", - "Fidelities to spend: 126\n", - "Fidelities to spend: 127\n", - "1\n", - "1\n", - "Fidelities to spend: 128\n", - "Fidelities to spend: 129\n", - "Fidelities to spend: 130\n", - "1\n", - "1\n", - "Fidelities to spend: 131\n", - "Fidelities to spend: 132\n", - "Fidelities to spend: 133\n", - "1\n", - "1\n", - "Fidelities to spend: 134\n", - "Fidelities to spend: 135\n", - "Fidelities to spend: 136\n", - "1\n", - "1\n", - "Fidelities to spend: 137\n", - "Fidelities to spend: 138\n", - "Fidelities to spend: 139\n", - "1\n", - "1\n", - "Fidelities to spend: 140\n", - "Fidelities to spend: 141\n", - "Fidelities to spend: 142\n", - "1\n", - "1\n", - "Fidelities to spend: 143\n", - "Fidelities to spend: 144\n", - "Fidelities to spend: 145\n", - "1\n", - "1\n", - "Fidelities to spend: 146\n", - "Fidelities to spend: 147\n", - "Fidelities to spend: 148\n", - "1\n", - "1\n", - "Fidelities to spend: 149\n", - "Fidelities to spend: 150\n", - "Fidelities to spend: 151\n", - "1\n", - "1\n", - "Fidelities to spend: 152\n", - "Fidelities to spend: 153\n", - "Fidelities to spend: 154\n", - "1\n", - "1\n", - "Fidelities to spend: 155\n", - "Fidelities to spend: 156\n", - "Fidelities to spend: 157\n", - "1\n", - "1\n", - "Fidelities to spend: 158\n", - "Fidelities to spend: 159\n", - "Fidelities to spend: 160\n", - "1\n", - "1\n", - "Fidelities to spend: 161\n", - "Fidelities to spend: 162\n", - "Fidelities to spend: 163\n", - "2\n", - "2\n", - "Fidelities to spend: 164\n", - "Fidelities to spend: 165\n", - "Fidelities to spend: 166\n", - "Fidelities to spend: 167\n", - "Fidelities to spend: 168\n", - "Fidelities to spend: 169\n", - "Fidelities to spend: 170\n", - "Fidelities to spend: 171\n", - "Fidelities to spend: 172\n", - "Fidelities to spend: 173\n", - "Fidelities to spend: 174\n", - "2\n", - "2\n", - "Fidelities to spend: 175\n", - "Fidelities to spend: 176\n", - "Fidelities to spend: 177\n", - "Fidelities to spend: 178\n", - "Fidelities to spend: 179\n", - "Fidelities to spend: 180\n", - "Fidelities to spend: 181\n", - "Fidelities to spend: 182\n", - "Fidelities to spend: 183\n", - "Fidelities to spend: 184\n", - "Fidelities to spend: 185\n", - "2\n", - "2\n", - "Fidelities to spend: 186\n", - "Fidelities to spend: 187\n", - "Fidelities to spend: 188\n", - "Fidelities to spend: 189\n", - "Fidelities to spend: 190\n", - "Fidelities to spend: 191\n", - "Fidelities to spend: 192\n", - "Fidelities to spend: 193\n", - "Fidelities to spend: 194\n", - "Fidelities to spend: 195\n", - "Fidelities to spend: 196\n", - "2\n", - "2\n", - "Fidelities to spend: 197\n", - "Fidelities to spend: 198\n", - "Fidelities to spend: 199\n", - "Fidelities to spend: 200\n", - "Fidelities to spend: 201\n", - "Fidelities to spend: 202\n", - "Fidelities to spend: 203\n", - "Fidelities to spend: 204\n", - "Fidelities to spend: 205\n", - "Fidelities to spend: 206\n", - "Fidelities to spend: 207\n", - "2\n", - "2\n", - "Fidelities to spend: 208\n", - "Fidelities to spend: 209\n", - "Fidelities to spend: 210\n", - "Fidelities to spend: 211\n", - "Fidelities to spend: 212\n", - "Fidelities to spend: 213\n", - "Fidelities to spend: 214\n", - "Fidelities to spend: 215\n", - "Fidelities to spend: 216\n", - "Fidelities to spend: 217\n", - "Fidelities to spend: 218\n", - "2\n", - "2\n", - "Fidelities to spend: 219\n", - "Fidelities to spend: 220\n", - "Fidelities to spend: 221\n", - "Fidelities to spend: 222\n", - "Fidelities to spend: 223\n", - "Fidelities to spend: 224\n", - "Fidelities to spend: 225\n", - "Fidelities to spend: 226\n", - "Fidelities to spend: 227\n", - "Fidelities to spend: 228\n", - "Fidelities to spend: 229\n", - "2\n", - "2\n", - "Fidelities to spend: 230\n", - "Fidelities to spend: 231\n", - "Fidelities to spend: 232\n", - "Fidelities to spend: 233\n", - "Fidelities to spend: 234\n", - "Fidelities to spend: 235\n", - "Fidelities to spend: 236\n", - "Fidelities to spend: 237\n", - "Fidelities to spend: 238\n", - "Fidelities to spend: 239\n", - "Fidelities to spend: 240\n", - "2\n", - "2\n", - "Fidelities to spend: 241\n", - "Fidelities to spend: 242\n", - "Fidelities to spend: 243\n", - "Fidelities to spend: 244\n", - "Fidelities to spend: 245\n", - "Fidelities to spend: 246\n", - "Fidelities to spend: 247\n", - "Fidelities to spend: 248\n", - "Fidelities to spend: 249\n", - "Fidelities to spend: 250\n", - "Fidelities to spend: 251\n", - "2\n", - "2\n", - "Fidelities to spend: 252\n", - "Fidelities to spend: 253\n", - "Fidelities to spend: 254\n", - "Fidelities to spend: 255\n", - "Fidelities to spend: 256\n", - "Fidelities to spend: 257\n", - "Fidelities to spend: 258\n", - "Fidelities to spend: 259\n", - "Fidelities to spend: 260\n", - "Fidelities to spend: 261\n", - "Fidelities to spend: 262\n", - "3\n", - "3\n", - "Fidelities to spend: 263\n", - "Fidelities to spend: 264\n", - "Fidelities to spend: 265\n", - "Fidelities to spend: 266\n", - "Fidelities to spend: 267\n", - "Fidelities to spend: 268\n", - "Fidelities to spend: 269\n", - "Fidelities to spend: 270\n", - "Fidelities to spend: 271\n", - "Fidelities to spend: 272\n", - "Fidelities to spend: 273\n", - "Fidelities to spend: 274\n", - "Fidelities to spend: 275\n", - "Fidelities to spend: 276\n", - "Fidelities to spend: 277\n", - "Fidelities to spend: 278\n", - "Fidelities to spend: 279\n", - "Fidelities to spend: 280\n", - "Fidelities to spend: 281\n", - "Fidelities to spend: 282\n", - "Fidelities to spend: 283\n", - "Fidelities to spend: 284\n", - "Fidelities to spend: 285\n", - "Fidelities to spend: 286\n", - "Fidelities to spend: 287\n", - "Fidelities to spend: 288\n", - "Fidelities to spend: 289\n", - "Fidelities to spend: 290\n", - "Fidelities to spend: 291\n", - "Fidelities to spend: 292\n", - "Fidelities to spend: 293\n", - "Fidelities to spend: 294\n", - "Fidelities to spend: 295\n", - "3\n", - "3\n", - "Fidelities to spend: 296\n", - "Fidelities to spend: 297\n", - "Fidelities to spend: 298\n", - "Fidelities to spend: 299\n", - "Fidelities to spend: 300\n", - "Fidelities to spend: 301\n", - "Fidelities to spend: 302\n", - "Fidelities to spend: 303\n", - "Fidelities to spend: 304\n", - "Fidelities to spend: 305\n", - "Fidelities to spend: 306\n", - "Fidelities to spend: 307\n", - "Fidelities to spend: 308\n", - "Fidelities to spend: 309\n", - "Fidelities to spend: 310\n", - "Fidelities to spend: 311\n", - "Fidelities to spend: 312\n", - "Fidelities to spend: 313\n", - "Fidelities to spend: 314\n", - "Fidelities to spend: 315\n", - "Fidelities to spend: 316\n", - "Fidelities to spend: 317\n", - "Fidelities to spend: 318\n", - "Fidelities to spend: 319\n", - "Fidelities to spend: 320\n", - "Fidelities to spend: 321\n", - "Fidelities to spend: 322\n", - "Fidelities to spend: 323\n", - "Fidelities to spend: 324\n", - "Fidelities to spend: 325\n", - "Fidelities to spend: 326\n", - "Fidelities to spend: 327\n", - "Fidelities to spend: 328\n", - "3\n", - "3\n", - "Fidelities to spend: 329\n", - "Fidelities to spend: 330\n", - "Fidelities to spend: 331\n", - "Fidelities to spend: 332\n", - "Fidelities to spend: 333\n", - "Fidelities to spend: 334\n", - "Fidelities to spend: 335\n", - "Fidelities to spend: 336\n", - "Fidelities to spend: 337\n", - "Fidelities to spend: 338\n", - "Fidelities to spend: 339\n", - "Fidelities to spend: 340\n", - "Fidelities to spend: 341\n", - "Fidelities to spend: 342\n", - "Fidelities to spend: 343\n", - "Fidelities to spend: 344\n", - "Fidelities to spend: 345\n", - "Fidelities to spend: 346\n", - "Fidelities to spend: 347\n", - "Fidelities to spend: 348\n", - "Fidelities to spend: 349\n", - "Fidelities to spend: 350\n", - "Fidelities to spend: 351\n", - "Fidelities to spend: 352\n", - "Fidelities to spend: 353\n", - "Fidelities to spend: 354\n", - "Fidelities to spend: 355\n", - "Fidelities to spend: 356\n", - "Fidelities to spend: 357\n", - "Fidelities to spend: 358\n", - "Fidelities to spend: 359\n", - "Fidelities to spend: 360\n", - "Fidelities to spend: 361\n", - "4\n", - "4\n", - "Fidelities to spend: 362\n", - "Fidelities to spend: 363\n", - "Fidelities to spend: 364\n", - "Fidelities to spend: 365\n", - "Fidelities to spend: 366\n", - "Fidelities to spend: 367\n", - "Fidelities to spend: 368\n", - "Fidelities to spend: 369\n", - "Fidelities to spend: 370\n", - "Fidelities to spend: 371\n", - "Fidelities to spend: 372\n", - "Fidelities to spend: 373\n", - "Fidelities to spend: 374\n", - "Fidelities to spend: 375\n", - "Fidelities to spend: 376\n", - "Fidelities to spend: 377\n", - "Fidelities to spend: 378\n", - "Fidelities to spend: 379\n", - "Fidelities to spend: 380\n", - "Fidelities to spend: 381\n", - "Fidelities to spend: 382\n", - "Fidelities to spend: 383\n", - "Fidelities to spend: 384\n", - "Fidelities to spend: 385\n", - "Fidelities to spend: 386\n", - "Fidelities to spend: 387\n", - "Fidelities to spend: 388\n", - "Fidelities to spend: 389\n", - "Fidelities to spend: 390\n", - "Fidelities to spend: 391\n", - "Fidelities to spend: 392\n", - "Fidelities to spend: 393\n", - "Fidelities to spend: 394\n", - "Fidelities to spend: 395\n", - "Fidelities to spend: 396\n", - "Fidelities to spend: 397\n", - "Fidelities to spend: 398\n", - "Fidelities to spend: 399\n", - "Fidelities to spend: 400\n", - "Fidelities to spend: 401\n", - "Fidelities to spend: 402\n", - "Fidelities to spend: 403\n", - "Fidelities to spend: 404\n", - "Fidelities to spend: 405\n", - "Fidelities to spend: 406\n", - "Fidelities to spend: 407\n", - "Fidelities to spend: 408\n", - "Fidelities to spend: 409\n", - "Fidelities to spend: 410\n", - "Fidelities to spend: 411\n", - "Fidelities to spend: 412\n", - "Fidelities to spend: 413\n", - "Fidelities to spend: 414\n", - "Fidelities to spend: 415\n", - "Fidelities to spend: 416\n", - "Fidelities to spend: 417\n", - "Fidelities to spend: 418\n", - "Fidelities to spend: 419\n", - "Fidelities to spend: 420\n", - "Fidelities to spend: 421\n", - "Fidelities to spend: 422\n", - "Fidelities to spend: 423\n", - "Fidelities to spend: 424\n", - "Fidelities to spend: 425\n", - "Fidelities to spend: 426\n", - "Fidelities to spend: 427\n", - "Fidelities to spend: 428\n", - "Fidelities to spend: 429\n", - "Fidelities to spend: 430\n", - "Fidelities to spend: 431\n", - "Fidelities to spend: 432\n", - "Fidelities to spend: 433\n", - "Fidelities to spend: 434\n", - "Fidelities to spend: 435\n", - "Fidelities to spend: 436\n", - "Fidelities to spend: 437\n", - "Fidelities to spend: 438\n", - "Fidelities to spend: 439\n", - "Fidelities to spend: 440\n", - "Fidelities to spend: 441\n", - "Fidelities to spend: 442\n", - "Fidelities to spend: 443\n", - "Fidelities to spend: 444\n", - "Fidelities to spend: 445\n", - "Fidelities to spend: 446\n", - "Fidelities to spend: 447\n", - "Fidelities to spend: 448\n", - "Fidelities to spend: 449\n", - "Fidelities to spend: 450\n", - "Fidelities to spend: 451\n", - "Fidelities to spend: 452\n", - "Fidelities to spend: 453\n", - "Fidelities to spend: 454\n", - "Fidelities to spend: 455\n", - "Fidelities to spend: 456\n", - "Fidelities to spend: 457\n", - "Fidelities to spend: 458\n", - "Fidelities to spend: 459\n", - "Fidelities to spend: 460\n", - "Fidelities to spend: 461\n", - "1\n", - "1\n", - "Fidelities to spend: 462\n", - "Fidelities to spend: 463\n", - "Fidelities to spend: 464\n", - "1\n", - "1\n", - "Fidelities to spend: 465\n", - "Fidelities to spend: 466\n", - "Fidelities to spend: 467\n", - "1\n", - "1\n", - "Fidelities to spend: 468\n", - "Fidelities to spend: 469\n", - "Fidelities to spend: 470\n", - "1\n", - "1\n", - "Fidelities to spend: 471\n", - "Fidelities to spend: 472\n", - "Fidelities to spend: 473\n", - "1\n", - "1\n", - "Fidelities to spend: 474\n", - "Fidelities to spend: 475\n", - "Fidelities to spend: 476\n", - "1\n", - "1\n", - "Fidelities to spend: 477\n", - "Fidelities to spend: 478\n", - "Fidelities to spend: 479\n", - "1\n", - "1\n", - "Fidelities to spend: 480\n", - "Fidelities to spend: 481\n", - "Fidelities to spend: 482\n", - "1\n", - "1\n", - "Fidelities to spend: 483\n", - "Fidelities to spend: 484\n", - "Fidelities to spend: 485\n", - "1\n", - "1\n", - "Fidelities to spend: 486\n", - "Fidelities to spend: 487\n", - "Fidelities to spend: 488\n", - "1\n", - "1\n", - "Fidelities to spend: 489\n", - "Fidelities to spend: 490\n", - "Fidelities to spend: 491\n", - "1\n", - "1\n", - "Fidelities to spend: 492\n", - "Fidelities to spend: 493\n", - "Fidelities to spend: 494\n", - "1\n", - "1\n", - "Fidelities to spend: 495\n", - "Fidelities to spend: 496\n", - "Fidelities to spend: 497\n", - "1\n", - "1\n", - "Fidelities to spend: 498\n", - "Fidelities to spend: 499\n", - "Fidelities to spend: 500\n", - "1\n", - "1\n", - "Fidelities to spend: 501\n", - "Fidelities to spend: 502\n", - "Fidelities to spend: 503\n", - "1\n", - "1\n", - "Fidelities to spend: 504\n", - "Fidelities to spend: 505\n", - "Fidelities to spend: 506\n", - "1\n", - "1\n", - "Fidelities to spend: 507\n", - "Fidelities to spend: 508\n", - "Fidelities to spend: 509\n", - "1\n", - "1\n", - "Fidelities to spend: 510\n", - "Fidelities to spend: 511\n", - "Fidelities to spend: 512\n", - "1\n", - "1\n", - "Fidelities to spend: 513\n", - "Fidelities to spend: 514\n", - "Fidelities to spend: 515\n", - "1\n", - "1\n", - "Fidelities to spend: 516\n", - "Fidelities to spend: 517\n", - "Fidelities to spend: 518\n", - "1\n", - "1\n", - "Fidelities to spend: 519\n", - "Fidelities to spend: 520\n", - "Fidelities to spend: 521\n", - "1\n", - "1\n", - "Fidelities to spend: 522\n", - "Fidelities to spend: 523\n", - "Fidelities to spend: 524\n", - "1\n", - "1\n", - "Fidelities to spend: 525\n", - "Fidelities to spend: 526\n", - "Fidelities to spend: 527\n", - "1\n", - "1\n", - "Fidelities to spend: 528\n", - "Fidelities to spend: 529\n", - "Fidelities to spend: 530\n", - "1\n", - "1\n", - "Fidelities to spend: 531\n", - "Fidelities to spend: 532\n", - "Fidelities to spend: 533\n", - "1\n", - "1\n", - "Fidelities to spend: 534\n", - "Fidelities to spend: 535\n", - "Fidelities to spend: 536\n", - "1\n", - "1\n", - "Fidelities to spend: 537\n", - "Fidelities to spend: 538\n", - "Fidelities to spend: 539\n", - "1\n", - "1\n", - "Fidelities to spend: 540\n", - "Fidelities to spend: 541\n", - "Fidelities to spend: 542\n", - "2\n", - "2\n", - "Fidelities to spend: 543\n", - "Fidelities to spend: 544\n", - "Fidelities to spend: 545\n", - "Fidelities to spend: 546\n", - "Fidelities to spend: 547\n", - "Fidelities to spend: 548\n", - "Fidelities to spend: 549\n", - "Fidelities to spend: 550\n", - "Fidelities to spend: 551\n", - "Fidelities to spend: 552\n", - "Fidelities to spend: 553\n", - "2\n", - "2\n", - "Fidelities to spend: 554\n", - "Fidelities to spend: 555\n", - "Fidelities to spend: 556\n", - "Fidelities to spend: 557\n", - "Fidelities to spend: 558\n", - "Fidelities to spend: 559\n", - "Fidelities to spend: 560\n", - "Fidelities to spend: 561\n", - "Fidelities to spend: 562\n", - "Fidelities to spend: 563\n", - "Fidelities to spend: 564\n", - "2\n", - "2\n", - "Fidelities to spend: 565\n", - "Fidelities to spend: 566\n", - "Fidelities to spend: 567\n", - "Fidelities to spend: 568\n", - "Fidelities to spend: 569\n", - "Fidelities to spend: 570\n", - "Fidelities to spend: 571\n", - "Fidelities to spend: 572\n", - "Fidelities to spend: 573\n", - "Fidelities to spend: 574\n", - "Fidelities to spend: 575\n", - "2\n", - "2\n", - "Fidelities to spend: 576\n", - "Fidelities to spend: 577\n", - "Fidelities to spend: 578\n", - "Fidelities to spend: 579\n", - "Fidelities to spend: 580\n", - "Fidelities to spend: 581\n", - "Fidelities to spend: 582\n", - "Fidelities to spend: 583\n", - "Fidelities to spend: 584\n", - "Fidelities to spend: 585\n", - "Fidelities to spend: 586\n", - "2\n", - "2\n", - "Fidelities to spend: 587\n", - "Fidelities to spend: 588\n", - "Fidelities to spend: 589\n", - "Fidelities to spend: 590\n", - "Fidelities to spend: 591\n", - "Fidelities to spend: 592\n", - "Fidelities to spend: 593\n", - "Fidelities to spend: 594\n", - "Fidelities to spend: 595\n", - "Fidelities to spend: 596\n", - "Fidelities to spend: 597\n", - "2\n", - "2\n", - "Fidelities to spend: 598\n", - "Fidelities to spend: 599\n" - ] - } - ], - "source": [ - "import neps\n", - "from neps import algorithms\n", - "from functools import partial\n", - "import matplotlib.pyplot as plt\n", - "\n", - "global_values = []\n", - "eta=3\n", - "\n", - "neps.run(\n", - " evaluate_pipeline,\n", - " SimpleSpace(),\n", - " root_directory=\"neps_test_runs/algo_tests4\",\n", - " overwrite_root_directory=True,\n", - " optimizer=partial(algorithms.neps_hyperband, eta=eta),\n", - " fidelities_to_spend=1\n", - ")\n", - "\n", - "neps.run(\n", - " evaluate_pipeline,\n", - " SimpleSpace(),\n", - " root_directory=\"neps_test_runs/algo_tests5\",\n", - " overwrite_root_directory=True,\n", - " optimizer=partial(algorithms.hyperband, eta=eta),\n", - " fidelities_to_spend=1\n", - ")\n", - "\n", - "\n", - "for f in range(1,600):\n", - " print(f\"Fidelities to spend: {f}\")\n", - " # partial(algorithms.neps_hyperband, sampler=\"prior\", eta=eta)]: \n", - " # partial(algorithms.hyperband, sampler=\"prior\", eta=eta)]:\n", - " neps.run(\n", - " evaluate_pipeline,\n", - " SimpleSpace(),\n", - " root_directory=\"neps_test_runs/algo_tests4\",\n", - " overwrite_root_directory=False,\n", - " optimizer=partial(algorithms.neps_hyperband, eta=eta),\n", - " fidelities_to_spend=f\n", - " )\n", - "\n", - " neps.run(\n", - " evaluate_pipeline,\n", - " SimpleSpace(),\n", - " root_directory=\"neps_test_runs/algo_tests5\",\n", - " overwrite_root_directory=False,\n", - " optimizer=partial(algorithms.hyperband, eta=eta),\n", - " fidelities_to_spend=f\n", - " )\n" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "eb12134d", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Count of 1 in algo 0: 81\n", - "Count of 3 in algo 0: 54\n", - "Count of 11 in algo 0: 15\n", - "Count of 33 in algo 0: 3\n", - "Count of 100 in algo 0: 1\n", - "Count of 1 in algo 1: 81\n", - "Count of 3 in algo 1: 54\n", - "Count of 11 in algo 1: 15\n", - "Count of 33 in algo 1: 3\n", - "Count of 100 in algo 1: 1\n" - ] - } - ], - "source": [ - "n_algos = 2\n", - "for i in range(n_algos):\n", - " for j in [v for v in range(1000) if v in global_values]:\n", - " print(f\"Count of {j:<3} in algo {i}: \", [v for n,v in enumerate(global_values) if n % n_algos == i].count(j))\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "neural-pipeline-search (3.13.1)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.1" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/neps_examples/test_files/priors_test.ipynb b/neps_examples/test_files/priors_test.ipynb deleted file mode 100644 index 3145ecd3f..000000000 --- a/neps_examples/test_files/priors_test.ipynb +++ /dev/null @@ -1,400 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "180fcb7f", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\Users\\Amega\\Git\\neps\\.venv\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Original pipeline:\n", - "PipelineSpace SimpleSpace with parameters:\n", - "\tint_param1 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.LOW)\n", - "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\n", - "==================================================\n", - "After removing 'int_param1' (in-place):\n", - "PipelineSpace SimpleSpace with parameters:\n", - "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\n", - "==================================================\n", - "After adding 'int_param1' (in-place):\n", - "PipelineSpace SimpleSpace with parameters:\n", - "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tint_param1 = Float(0.0, 1.0)\n", - "\n", - "==================================================\n", - "After removing 'int_param1' (in-place):\n", - "PipelineSpace SimpleSpace with parameters:\n", - "\tint_param2 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tint_param3 = Integer(1, 100, prior=50, prior_confidence=ConfidenceLevel.HIGH)\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import torch\n", - "import torch.nn as nn\n", - "import neps\n", - "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled\n", - "\n", - "# Define the NEPS space for the neural network architecture\n", - "class SimpleSpace(PipelineSpace):\n", - " int_param1 = neps.Integer(1,100, prior=50, prior_confidence=\"low\")\n", - " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\")\n", - " int_param3 = neps.Integer(1,100, prior=50, prior_confidence=\"high\")\n", - "\n", - "class OtherSpace(PipelineSpace):\n", - " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\", log=False)\n", - "\n", - "# Test in-place operations\n", - "pipeline = SimpleSpace()\n", - "print(\"Original pipeline:\")\n", - "print(pipeline)\n", - "\n", - "print(\"\\n\" + \"=\"*50)\n", - "print(\"After removing 'int_param1' (in-place):\")\n", - "pipeline = pipeline.remove(\"int_param1\")\n", - "print(pipeline)\n", - "\n", - "print(\"\\n\" + \"=\"*50)\n", - "print(\"After adding 'int_param1' (in-place):\")\n", - "pipeline = pipeline.add(neps.Float(0.0, 1.0), \"int_param1\")\n", - "print(pipeline)\n", - "\n", - "print(\"\\n\" + \"=\"*50)\n", - "print(\"After removing 'int_param1' (in-place):\")\n", - "pipeline = pipeline.remove(\"int_param1\")\n", - "print(pipeline)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "84c7766b", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - " Converted categorical_param\n", - "{'choices': ('a', 'b', 'c'),\n", - " 'prior': 0,\n", - " 'prior_confidence': }\n", - "\n", - " Pipeline categorical_param\n", - "{'choices': ('a', 'b', 'c'),\n", - " 'prior': 0,\n", - " 'prior_confidence': }\n", - "\n", - " Converted fidelity_param\n", - "{'min_value': 1, 'max_value': 10, 'log': False, 'prior': , 'prior_confidence': }\n", - "\n", - " Pipeline fidelity_param\n", - "{'min_value': 1, 'max_value': 10, 'log': False, 'prior': , 'prior_confidence': }\n", - "\n", - " Converted float_param\n", - "{'log': False,\n", - " 'max_value': 1.0,\n", - " 'min_value': 0.0,\n", - " 'prior': 0.5,\n", - " 'prior_confidence': }\n", - "\n", - " Pipeline float_param\n", - "{'log': False,\n", - " 'max_value': 1.0,\n", - " 'min_value': 0,\n", - " 'prior': 0.5,\n", - " 'prior_confidence': }\n", - "\n", - " Converted int_param2\n", - "{'log': True,\n", - " 'max_value': 100,\n", - " 'min_value': 1,\n", - " 'prior': 50,\n", - " 'prior_confidence': }\n", - "\n", - " Pipeline int_param2\n", - "{'log': True,\n", - " 'max_value': 100,\n", - " 'min_value': 1,\n", - " 'prior': 50,\n", - " 'prior_confidence': }\n", - "PipelineSpace SimpleSpace with parameters:\n", - "\tint_param2 = Integer(1, 100, log, prior=50, prior_confidence=ConfidenceLevel.MEDIUM)\n", - "\tcategorical_param = Categorical(choices=('a', 'b', 'c'), prior=0, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tfloat_param = Float(0, 1.0, prior=0.5, prior_confidence=ConfidenceLevel.HIGH)\n", - "\tfidelity_param = Fidelity(Integer(1, 10))\n" - ] - } - ], - "source": [ - "import neps\n", - "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled\n", - "from neps.space.neps_spaces import sampling\n", - "from neps.space.neps_spaces import neps_space\n", - "from functools import partial\n", - "from pprint import pprint\n", - "\n", - "# Define the NEPS space for the neural network architecture\n", - "class SimpleSpace(PipelineSpace):\n", - " int_param2 = neps.Integer(1,100, prior=50, log=True, prior_confidence=\"medium\")\n", - " categorical_param = Categorical((\"a\", \"b\", \"c\"), prior=0, prior_confidence=\"high\")\n", - " float_param = neps.Float(0, 1.0, prior=0.5, prior_confidence=\"high\")\n", - " fidelity_param = neps.Fidelity(neps.Integer(1, 10))\n", - "\n", - "old_space = neps.SearchSpace({\n", - " \"int_param2\": neps.HPOInteger(1,100, log=True, prior=50, prior_confidence=\"medium\"),\n", - " \"categorical_param\": neps.HPOCategorical([\"a\", \"b\", \"c\"], prior=\"a\", prior_confidence=\"high\"),\n", - " \"float_param\": neps.HPOFloat(0, 1.0, prior=0.5, prior_confidence=\"high\"),\n", - " \"fidelity_param\": neps.HPOInteger(1, 10,is_fidelity=True)\n", - "})\n", - "pipeline = SimpleSpace()\n", - "converted_space = neps.space.neps_spaces.neps_space.convert_classic_to_neps_search_space(old_space)\n", - "\n", - "for name in converted_space.get_attrs().keys():\n", - " param = converted_space.get_attrs()[name]\n", - " print(\"\\n Converted\",name)\n", - " if isinstance(param, neps.Fidelity):\n", - " print(param._domain.get_attrs())\n", - " else:\n", - " pprint(param.get_attrs())\n", - "\n", - " param = pipeline.get_attrs()[name]\n", - " print(\"\\n Pipeline\",name)\n", - " if isinstance(param, neps.Fidelity):\n", - " print(param._domain.get_attrs())\n", - " else:\n", - " pprint(param.get_attrs())\n", - "\n", - "print(pipeline)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "59280930", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:neps.api:Starting neps.run using root directory results/fidelity_ignore_test\n", - "INFO:neps.runtime:Overwriting optimization directory 'results\\fidelity_ignore_test' as `overwrite_optimization_dir=True`.\n", - "INFO:neps.runtime:Summary files can be found in the “summary” folder inside the root directory: C:\\Users\\Amega\\Git\\neps\\neps_examples\\test_files\\results\\fidelity_ignore_test\\summary\n", - "INFO:neps.runtime:Using optimizer: neps_priorband\n", - "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 1_rung_0.\n", - "INFO:neps.state.pipeline_eval:Successful evaluation of '1_rung_0': 120.\n", - "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 1_rung_0 as State.SUCCESS.\n", - "INFO:neps.runtime:New best: trial 1_rung_0 with objective 120.0\n", - "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 2_rung_0.\n", - "INFO:neps.state.pipeline_eval:Successful evaluation of '2_rung_0': 104.\n", - "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 2_rung_0 as State.SUCCESS.\n", - "INFO:neps.runtime:New best: trial 2_rung_0 with objective 104.0\n", - "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 3_rung_0.\n", - "INFO:neps.state.pipeline_eval:Successful evaluation of '3_rung_0': 51.\n", - "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 3_rung_0 as State.SUCCESS.\n", - "INFO:neps.runtime:New best: trial 3_rung_0 with objective 51.0\n", - "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 4_rung_0.\n", - "INFO:neps.state.pipeline_eval:Successful evaluation of '4_rung_0': 88.\n", - "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 4_rung_0 as State.SUCCESS.\n", - "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 5_rung_0.\n", - "INFO:neps.state.pipeline_eval:Successful evaluation of '5_rung_0': 129.\n", - "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 5_rung_0 as State.SUCCESS.\n", - "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 6_rung_0.\n", - "INFO:neps.state.pipeline_eval:Successful evaluation of '6_rung_0': 118.\n", - "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 6_rung_0 as State.SUCCESS.\n", - "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 7_rung_0.\n", - "INFO:neps.state.pipeline_eval:Successful evaluation of '7_rung_0': 30.\n", - "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 7_rung_0 as State.SUCCESS.\n", - "INFO:neps.runtime:New best: trial 7_rung_0 with objective 30.0\n", - "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 8_rung_0.\n", - "INFO:neps.state.pipeline_eval:Successful evaluation of '8_rung_0': 109.\n", - "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 8_rung_0 as State.SUCCESS.\n", - "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 9_rung_0.\n", - "INFO:neps.state.pipeline_eval:Successful evaluation of '9_rung_0': 148.\n", - "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 9_rung_0 as State.SUCCESS.\n", - "INFO:neps.runtime:Worker 'worker_0' sampled new trial: 7_rung_1.\n", - "INFO:neps.state.pipeline_eval:Successful evaluation of '7_rung_1': 32.\n", - "INFO:neps.runtime:Worker 'worker_0' evaluated trial: 7_rung_1 as State.SUCCESS.\n", - "INFO:neps.runtime:The total number of fidelity evaluations has reached the maximum allowed of `self.settings.fidelities_to_spend=10`. To allow more evaluations, increase this value or use a different stopping criterion.\n", - "INFO:neps.api:The summary folder has been created, which contains csv and txt files withthe output of all data in the run (short.csv - only the best; full.csv - all runs; best_config_trajectory.txt for incumbent trajectory; and best_config.txt for final incumbent).\n", - "You can find summary folder at: results\\fidelity_ignore_test\\summary.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "# Configs: 10\n", - "\n", - " success: 10\n", - "\n", - "# Best Found (config 7_rung_0):\n", - "\n", - " objective_to_minimize: 30.0\n", - " config: int_param1\n", - " 20\n", - " \t01 :: 20\n", - " config: fidelity_param\n", - " 1\n", - " \t01 :: 1\n", - " config: categorical_param\n", - " b\n", - " \t01 :: b\n", - " config: operation\n", - " Conv2D(7)\n", - " \t01 :: Conv2D\n", - " config: operation2\n", - " Sequential(Conv2d(3, 16, 3))\n", - " \t01 :: Sequential\n", - " \t\t02 :: Conv2d\n", - " path: C:\\Users\\Amega\\Git\\neps\\neps_examples\\test_files\\results\\fidelity_ignore_test\\configs\\config_7_rung_0\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import torch\n", - "import torch.nn as nn\n", - "import neps\n", - "from neps.space.neps_spaces.parameters import PipelineSpace, Operation, Categorical, Resampled\n", - "from neps.space.neps_spaces import sampling\n", - "from neps.space.neps_spaces import neps_space\n", - "from functools import partial\n", - "\n", - "# Define the NEPS space for the neural network architecture\n", - "class SimpleSpace(PipelineSpace):\n", - " int_param1 = neps.Integer(1,100)#, prior=50, prior_confidence=\"low\")\n", - " int_param2 = neps.Integer(1,100, prior=50, prior_confidence=\"medium\")\n", - " int_param3 = neps.Integer(1,100, prior=50, prior_confidence=\"high\")\n", - " int_param4 = neps.Integer(1,3, prior=2, prior_confidence=\"low\")\n", - " categorical_param = Categorical((\"a\", \"b\", int_param1))\n", - " float_param = neps.Float(0, 1.0, prior=0.5, prior_confidence=\"high\")\n", - " fidelity_param = neps.Fidelity(neps.Integer(1, 10))\n", - " operation = neps.Operation(\n", - " \"Conv2D\",\n", - " args=neps.Resampled(int_param1),\n", - " kwargs={\n", - " \"kernel_size\": neps.Resampled(int_param4),\n", - " }\n", - " )\n", - " conv = neps.Operation(\n", - " nn.Conv2d,\n", - " args=(3,16,3),\n", - " kwargs={\n", - " \"stride\": neps.Resampled(int_param4),\n", - " }\n", - " )\n", - " operation2 = neps.Operation(\n", - " nn.Sequential,\n", - " args=(neps.Resampled(conv),),\n", - " )\n", - "\n", - "# Sampling and printing one random configuration of the pipeline\n", - "pipeline = SimpleSpace()\n", - "\n", - "def evaluate_pipeline(int_param1, int_param2, fidelity_param, categorical_param, **kwargs):\n", - " # print(kwargs)\n", - " return int_param1 + int_param2 + fidelity_param\n", - "\n", - "for i in range(1):\n", - " # resolved_pipeline, resolution_context = neps_space.resolve(pipeline,domain_sampler=sampler)\n", - " new_rs=neps.algorithms.NePSRandomSearch(pipeline,ignore_fidelity=True)\n", - " # old_rs=neps.algorithms.random_search(pipeline,ignore_fidelity=True)\n", - " # print(new_rs({},None))\n", - "\n", - " # s = resolved_pipeline.int_param1\n", - " # print(resolved_pipeline.get_attrs())\n", - " import logging\n", - "\n", - " logging.basicConfig(level=logging.INFO)\n", - " neps.run(\n", - " evaluate_pipeline,\n", - " pipeline,\n", - " root_directory=\"results/fidelity_ignore_test\",\n", - " overwrite_root_directory=True,\n", - " optimizer=neps.algorithms.neps_priorband,\n", - " fidelities_to_spend=10\n", - " )\n", - " neps.status(\"results/fidelity_ignore_test\",print_summary=True, pipeline_space_variables=(SimpleSpace(),[\"int_param1\", \"fidelity_param\", \"categorical_param\", \"operation\", \"operation2\"]))" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "94af7ec4", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "('D',)\n", - "('D',)\n" - ] - } - ], - "source": [ - "from neps.space.neps_spaces import parameters, sampling, neps_space\n", - "\n", - "class TestSpace(parameters.PipelineSpace):\n", - " cat_var = parameters.Categorical((\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\"))\n", - " cat_var_choice_2 = parameters.Categorical(\n", - " (\n", - " (\n", - " parameters.Resampled(cat_var),\n", - " ),\n", - " )\n", - " )\n", - " reresampled_var = parameters.Resampled(cat_var_choice_2)\n", - " reresampled_var2 = parameters.Resampled(cat_var_choice_2)\n", - "\n", - "random_sampler = sampling.RandomSampler({})\n", - "sampler = sampling.PriorOrFallbackSampler(fallback_sampler=random_sampler, always_use_prior=False)\n", - "\n", - "resolved_pipeline, resolution_context = neps_space.resolve(TestSpace(),domain_sampler=random_sampler)\n", - "print(resolved_pipeline.reresampled_var)\n", - "print(resolved_pipeline.reresampled_var2)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "neural-pipeline-search (3.13.1)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.1" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/tests/test_examples.py b/tests/test_examples.py index 5510c084e..1b3e0949a 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -8,6 +8,8 @@ import pytest from neps_examples import ci_examples, core_examples +from neps.exceptions import WorkerFailedToGetPendingTrialsError + @pytest.fixture(autouse=True) def use_tmpdir(tmp_path, request): @@ -47,7 +49,29 @@ def test_core_examples(example): ): pytest.xfail("Architecture were removed temporarily") - runpy.run_path(str(example), run_name="__main__") + # pytorch_nn_example has a known recursion issue in resolution + if example.name == "pytorch_nn_example.py": + try: + runpy.run_path(str(example), run_name="__main__") + except (RecursionError, WorkerFailedToGetPendingTrialsError) as e: + # RecursionError occurs during resolution of nested structures + # WorkerFailedToGetPendingTrialsError occurs when RecursionError repeats + # This is a known bug that should be fixed, so we use xfail instead of skip + error_str = str(e) + cause_str = str(e.__cause__) if e.__cause__ else "" + if ( + "RecursionError" in error_str + or "maximum recursion depth" in error_str + or "maximum recursion depth" in cause_str + ): + pytest.xfail( + "Known RecursionError bug in nested structure resolution:" + f" {type(e).__name__}" + ) + # If it's a different error, fail the test + raise + else: + runpy.run_path(str(example), run_name="__main__") @pytest.mark.ci_examples From 5d79e1f8239b61ddc4ed4fdc8001fe577f632eec Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 3 Nov 2025 23:11:12 +0100 Subject: [PATCH 106/156] Enhance NEPS API and examples: add create_config function, improve parameter handling, and update example scripts for better usability --- neps/__init__.py | 7 +- neps/api.py | 31 ++++- neps/normalization.py | 42 +++--- neps/optimizers/algorithms.py | 6 +- neps/optimizers/neps_random_search.py | 8 +- neps/space/neps_spaces/neps_space.py | 28 ++-- neps/space/neps_spaces/parameters.py | 14 +- neps/space/neps_spaces/sampling.py | 125 +++++++++++++++++- .../basic_usage/pytorch_nn_example.py | 2 +- .../convenience/async_evaluation/submit.py | 2 +- neps_examples/convenience/config_creation.py | 35 +++++ .../import_trials.py} | 0 .../experimental/ask_and_tell_example.py | 2 +- pyproject.toml | 4 + ...test_space_conversion_and_compatibility.py | 2 +- 15 files changed, 242 insertions(+), 66 deletions(-) create mode 100644 neps_examples/convenience/config_creation.py rename neps_examples/{basic_usage/example_import_trials.py => convenience/import_trials.py} (100%) diff --git a/neps/__init__.py b/neps/__init__.py index 71c12258e..a332fbbcf 100644 --- a/neps/__init__.py +++ b/neps/__init__.py @@ -5,7 +5,7 @@ and algorithms. """ -from neps.api import import_trials, run, save_pipeline_results +from neps.api import create_config, import_trials, run, save_pipeline_results from neps.optimizers import algorithms from neps.optimizers.ask_and_tell import AskAndTell from neps.optimizers.optimizer import SampledConfig @@ -25,7 +25,7 @@ from neps.state import BudgetInfo, Trial from neps.state.pipeline_eval import UserResultDict from neps.status.status import status -from neps.utils.files import load_and_merge_yamls as load_yamls +from neps.utils.files import load_and_merge_yamls __all__ = [ "AskAndTell", @@ -47,8 +47,9 @@ "Trial", "UserResultDict", "algorithms", + "create_config", "import_trials", - "load_yamls", + "load_and_merge_yamls", "plot", "run", "save_pipeline_results", diff --git a/neps/api.py b/neps/api.py index a5ad10471..35750a2c9 100644 --- a/neps/api.py +++ b/neps/api.py @@ -17,6 +17,7 @@ check_neps_space_compatibility, convert_classic_to_neps_search_space, convert_neps_to_classic_search_space, + resolve, ) from neps.space.neps_spaces.parameters import PipelineSpace from neps.space.parsing import convert_to_space @@ -406,12 +407,6 @@ def __call__( "'priorband', or 'complex_random_search'." ) - if isinstance(space, PipelineSpace): - assert not isinstance(evaluate_pipeline, str) - evaluate_pipeline = adjust_evaluation_pipeline_for_neps_space( - evaluate_pipeline, space - ) - _optimizer_ask, _optimizer_info = load_optimizer(optimizer=optimizer, space=space) multi_fidelity_optimizers = { @@ -458,6 +453,8 @@ def __call__( "evaluate_pipeline must be a callable or a string in the format" "'module:function'." ) + if isinstance(space, PipelineSpace): + _eval = adjust_evaluation_pipeline_for_neps_space(_eval, space) _launch_runtime( evaluation_fn=_eval, # type: ignore @@ -657,4 +654,24 @@ def import_trials( state.lock_and_import_trials(imported_trials, worker_id="external") -__all__ = ["import_trials", "run", "save_pipeline_results"] +def create_config( + pipeline_space: PipelineSpace, +) -> tuple[Mapping[str, Any], PipelineSpace]: + """Create a configuration by prompting the user for input. + + Args: + pipeline_space: The pipeline search space to create a configuration for. + + Returns: + A tuple containing the created configuration dictionary and the sampled pipeline. + """ + from neps.space.neps_spaces.neps_space import NepsCompatConverter + from neps.space.neps_spaces.sampling import IOSampler + + resolved_pipeline, resolution_context = resolve( + pipeline_space, domain_sampler=IOSampler() + ) + return NepsCompatConverter.to_neps_config(resolution_context), resolved_pipeline + + +__all__ = ["create_config", "import_trials", "run", "save_pipeline_results"] diff --git a/neps/normalization.py b/neps/normalization.py index 9d827c413..3e8ff3f1f 100644 --- a/neps/normalization.py +++ b/neps/normalization.py @@ -28,6 +28,12 @@ def _normalize_imported_config( # noqa: C901, PLR0912 """ if isinstance(space, SearchSpace): all_param_keys = set(space.searchables.keys()) | set(space.fidelities.keys()) + # copy to avoid modifying the original config + normalized_conf = dict(config) + for key, param in space.fidelities.items(): + if key not in normalized_conf: + normalized_conf[key] = param.upper + extra_keys = set(normalized_conf.keys()) - all_param_keys else: # For PipelineSpace, we need to generate the prefixed keys # Import here to avoid circular import @@ -57,38 +63,28 @@ def _normalize_imported_config( # noqa: C901, PLR0912 f"{NepsCompatConverter._ENVIRONMENT_PREFIX}{fidelity_name}" ) - # copy to avoid modifying the original config - normalized_conf = dict(config) - - fidelities = ( - space.fidelities if isinstance(space, SearchSpace) else space.fidelity_attrs - ) - for key, param in fidelities.items(): - if key not in normalized_conf: - normalized_conf[key] = param.upper + # copy to avoid modifying the original config + normalized_conf = dict(config) - if isinstance(space, SearchSpace): - extra_keys = set(normalized_conf.keys()) - all_param_keys - else: + for key, fid_param in space.fidelity_attrs.items(): + if key not in normalized_conf: + normalized_conf[NepsCompatConverter._ENVIRONMENT_PREFIX + key] = ( + fid_param.upper + ) # For PipelineSpace, filter out keys that match the expected patterns # Import here to avoid circular import (needed for prefix constants) from neps.space.neps_spaces.neps_space import NepsCompatConverter extra_keys = set() for key in normalized_conf: - if not ( - key.startswith( - ( - NepsCompatConverter._SAMPLING_PREFIX, - NepsCompatConverter._ENVIRONMENT_PREFIX, - ) + if not key.startswith( + ( + NepsCompatConverter._SAMPLING_PREFIX, + NepsCompatConverter._ENVIRONMENT_PREFIX, ) ): - # Check if it's a plain parameter name (without prefix) - if key not in space.get_attrs() and key not in space.fidelity_attrs: - extra_keys.add(key) - elif key not in all_param_keys: - # It has a prefix but doesn't match expected sampling/environment keys + # It has no prefix. + # TODO: It might still be unnecessary, but it will not hurt. extra_keys.add(key) if extra_keys: diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index e9dad13ab..1e404539b 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -46,7 +46,10 @@ from neps.optimizers.random_search import RandomSearch from neps.sampling import Prior, Sampler, Uniform from neps.space.encoding import CategoricalToUnitNorm, ConfigEncoder -from neps.space.neps_spaces.neps_space import convert_neps_to_classic_search_space +from neps.space.neps_spaces.neps_space import ( + NepsCompatConverter, + convert_neps_to_classic_search_space, +) from neps.space.neps_spaces.parameters import ( Categorical, Float, @@ -1662,6 +1665,7 @@ def _neps_bracket_optimizer( ) fidelity_name, fidelity_obj = next(iter(fidelity_attrs.items())) + fidelity_name = NepsCompatConverter._ENVIRONMENT_PREFIX + fidelity_name if sample_prior_first not in (True, False, "highest_fidelity"): raise ValueError( diff --git a/neps/optimizers/neps_random_search.py b/neps/optimizers/neps_random_search.py index cb9939994..afc49c9e2 100644 --- a/neps/optimizers/neps_random_search.py +++ b/neps/optimizers/neps_random_search.py @@ -69,13 +69,13 @@ def __init__( " fidelity`." ) # Sample randomly from the fidelity bounds. - elif isinstance(fidelity_obj._domain, Integer): + elif isinstance(fidelity_obj.domain, Integer): assert isinstance(fidelity_obj.lower, int) assert isinstance(fidelity_obj.upper, int) self._environment_values[fidelity_name] = random.randint( fidelity_obj.lower, fidelity_obj.upper ) - elif isinstance(fidelity_obj._domain, Float): + elif isinstance(fidelity_obj.domain, Float): self._environment_values[fidelity_name] = random.uniform( fidelity_obj.lower, fidelity_obj.upper ) @@ -176,13 +176,13 @@ def __init__( " `highest fidelity`." ) # Sample randomly from the fidelity bounds. - elif isinstance(fidelity_obj._domain, Integer): + elif isinstance(fidelity_obj.domain, Integer): assert isinstance(fidelity_obj.lower, int) assert isinstance(fidelity_obj.upper, int) self._environment_values[fidelity_name] = random.randint( fidelity_obj.lower, fidelity_obj.upper ) - elif isinstance(fidelity_obj._domain, Float): + elif isinstance(fidelity_obj.domain, Float): self._environment_values[fidelity_name] = random.uniform( fidelity_obj.lower, fidelity_obj.upper ) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 1f1fe01ae..542af2503 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -30,6 +30,7 @@ ) from neps.space.neps_spaces.sampling import ( DomainSampler, + IOSampler, OnlyPredefinedValuesSampler, RandomSampler, ) @@ -37,6 +38,7 @@ if TYPE_CHECKING: from neps.space import SearchSpace + from neps.state.pipeline_eval import EvaluatePipelineReturn P = TypeVar("P", bound="PipelineSpace") @@ -858,6 +860,8 @@ def resolve( if environment_values is None: # By default, have no environment values. environment_values = {} + if isinstance(domain_sampler, IOSampler): + environment_values = domain_sampler.sample_environment_values(pipeline) sampling_resolver = SamplingResolver() resolved_pipeline, context = sampling_resolver( @@ -1146,10 +1150,10 @@ def _prepare_sampled_configs( def adjust_evaluation_pipeline_for_neps_space( - evaluation_pipeline: Callable, + evaluation_pipeline: Callable[..., EvaluatePipelineReturn], pipeline_space: P, operation_converter: Callable[[Operation], Any] = convert_operation_to_callable, -) -> Callable | str: +) -> Callable: """Adjust the evaluation pipeline to work with a NePS space. This function wraps the evaluation pipeline to sample from the NePS space and convert the sampled pipeline to a format compatible with the evaluation pipeline. @@ -1268,24 +1272,24 @@ def convert_neps_to_classic_search_space(space: PipelineSpace) -> SearchSpace | ), ) elif isinstance(value, Fidelity): - if isinstance(value._domain, Integer): + if isinstance(value.domain, Integer): classic_space[key] = neps.HPOInteger( - lower=value._domain.lower, - upper=value._domain.upper, + lower=value.domain.lower, + upper=value.domain.upper, log=( - value._domain._log - if hasattr(value._domain, "_log") + value.domain._log + if hasattr(value.domain, "_log") else False ), is_fidelity=True, ) - elif isinstance(value._domain, Float): + elif isinstance(value.domain, Float): classic_space[key] = neps.HPOFloat( - lower=value._domain.lower, - upper=value._domain.upper, + lower=value.domain.lower, + upper=value.domain.upper, log=( - value._domain._log - if hasattr(value._domain, "_log") + value.domain._log + if hasattr(value.domain, "_log") else False ), is_fidelity=True, diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index eda6d10e5..21a81ce5b 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -112,11 +112,11 @@ def __init__(self, domain: Integer | Float): "The domain of a Fidelity can not have priors, has prior:" f" {domain.prior!r}." ) - self._domain = domain + self.domain = domain def __str__(self) -> str: """Get a string representation of the fidelity.""" - return f"Fidelity({self._domain.__str__()})" + return f"Fidelity({self.domain.__str__()})" def compare_domain_to(self, other: object) -> bool: """Check if this fidelity parameter is equivalent to another. @@ -133,7 +133,7 @@ def compare_domain_to(self, other: object) -> bool: """ if not isinstance(other, Fidelity): return False - return self._domain == other._domain + return self.domain == other.domain @property def lower(self) -> int | float: @@ -142,7 +142,7 @@ def lower(self) -> int | float: Returns: The minimum value of the fidelity domain. """ - return self._domain.lower + return self.domain.lower @property def upper(self) -> int | float: @@ -151,7 +151,7 @@ def upper(self) -> int | float: Returns: The maximum value of the fidelity domain. """ - return self._domain.upper + return self.domain.upper def get_attrs(self) -> Mapping[str, Any]: """Get the attributes of the fidelity as a mapping. @@ -252,7 +252,7 @@ def __str__(self) -> str: for k, v in self.get_attrs().items() if not k.startswith("_") and not callable(v) ) - return f"PipelineSpace {self.__class__.__name__} with parameters:\n\t{attrs}" + return f"{self.__class__.__name__} with parameters:\n\t{attrs}" def add( self, @@ -652,7 +652,7 @@ def __init__( def __str__(self) -> str: """Get a string representation of the categorical domain.""" - string = f"Categorical(choices={self._choices!s}" + string = f"Categorical(choices={tuple([str(choice) for choice in self.choices])})" # type: ignore[union-attr] if self.has_prior: string += f", prior={self._prior}, prior_confidence={self._prior_confidence}" string += ")" diff --git a/neps/space/neps_spaces/sampling.py b/neps/space/neps_spaces/sampling.py index 0368e2b89..fbc1f00d4 100644 --- a/neps/space/neps_spaces/sampling.py +++ b/neps/space/neps_spaces/sampling.py @@ -19,6 +19,7 @@ Integer, PipelineSpace, ) +from neps.validation import validate_parameter_value T = TypeVar("T") P = TypeVar("P", bound="PipelineSpace") @@ -91,7 +92,7 @@ def __call__( """ if current_path not in self._predefined_samplings: raise ValueError(f"No predefined value for path: {current_path!r}.") - return cast(T, self._predefined_samplings[current_path]) + return cast("T", self._predefined_samplings[current_path]) class RandomSampler(DomainSampler): @@ -141,10 +142,124 @@ def __call__( if current_path not in self._predefined_samplings: sampled_value = domain_obj.sample() else: - sampled_value = cast(T, self._predefined_samplings[current_path]) + sampled_value = cast("T", self._predefined_samplings[current_path]) return sampled_value +class IOSampler(DomainSampler): + """A sampler that samples by asking the user at each decision.""" + + def __call__( + self, + *, + domain_obj: Domain[T], + current_path: str, + ) -> T: + """Sample a value from the predefined samplings or the domain. + + Args: + domain_obj: The domain object from which to sample. + current_path: The current path in the search space. + + Returns: + A value from the user input. + """ + if isinstance(domain_obj, Float | Integer): + print( + "Please provide" + f" {'a float' if isinstance(domain_obj, Float) else 'an integer'} value" + f" for \n\t'{current_path}'\nin the range [{domain_obj.lower}," # type: ignore[attr-defined] + f" {domain_obj.upper}]: ", # type: ignore[attr-defined] + ) + elif isinstance(domain_obj, Categorical): + choices_list = "\n\t".join( + f"{n}: {choice!s}" + for n, choice in enumerate(domain_obj.choices) # type: ignore[attr-defined, arg-type] + ) + max_index = int(domain_obj.range_compatibility_identifier) - 1 # type: ignore[attr-defined] + print( + f"Please provide an index for '{current_path}'\n" + f"Choices:\n\t{choices_list}\n" + f"Valid range: [0, {max_index}]: " + ) + + while True: + sampled_value: str | int | float = input() + try: + if isinstance(domain_obj, Integer): + sampled_value = int(sampled_value) + elif isinstance(domain_obj, Float): + sampled_value = float(sampled_value) + elif isinstance(domain_obj, Categorical): + sampled_value = int(sampled_value) + else: + raise ValueError( + f"Unsupported domain type: {type(domain_obj).__name__}" + ) + + assert isinstance(domain_obj, Float | Integer | Categorical) + + if validate_parameter_value(domain_obj, sampled_value): + print(f"Value {sampled_value} recorded.\n") + break + else: + print( + f"Invalid value '{sampled_value}' for domain '{current_path}'. " + "Please try again: ", + ) + except ValueError: + print( + f"Could not convert input '{sampled_value}' to the required type. " + "Please try again: ", + ) + + return cast("T", sampled_value) + + def sample_environment_values(self, pipeline_space: P) -> Mapping[str, Any]: + """Get the environment values for the sampler. + + Returns: + The interactively chosen environment values. + """ + environment_values = {} + for fidelity_name, fidelity_object in pipeline_space.fidelity_attrs.items(): + domain_obj = fidelity_object.domain + print( + "Please provide" + f" {'a float' if isinstance(domain_obj, Float) else 'an integer'} value" + f" for the Fidelity '{fidelity_name}' in the range" + f" [{domain_obj.lower}, {domain_obj.upper}]: ", + ) + while True: + sampled_value: str | int | float = input() + try: + if isinstance(domain_obj, Integer): + sampled_value = int(sampled_value) + elif isinstance(domain_obj, Float): + sampled_value = float(sampled_value) + else: + raise ValueError( + f"Unsupported domain type: {type(domain_obj).__name__}" + ) + + if validate_parameter_value(domain_obj, sampled_value): + print(f"Value {sampled_value} recorded.\n") + break + else: + print( + f"Invalid value '{sampled_value}' for Fidelity" + f" '{fidelity_object!s}'. Please try again: ", + ) + except ValueError: + print( + f"Could not convert input '{sampled_value}' to the required type." + " Please try again: ", + ) + environment_values[fidelity_name] = sampled_value + + return environment_values + + class PriorOrFallbackSampler(DomainSampler): """A sampler that uses a prior value if available, otherwise falls back to another sampler. @@ -228,10 +343,10 @@ def __call__( scale=std_dev, ) if isinstance(domain_obj, Integer): - sampled_value = int(round(sampled_value)) + sampled_value = round(sampled_value) else: sampled_value = float(sampled_value) # type: ignore - return cast(T, sampled_value) + return cast("T", sampled_value) return self._fallback_sampler( domain_obj=domain_obj, @@ -410,7 +525,7 @@ def __call__( sampled_value = domain_obj.sample() else: # For this path we have chosen to keep the original value. - sampled_value = cast(T, self._kept_samplings_to_make[current_path]) + sampled_value = cast("T", self._kept_samplings_to_make[current_path]) return sampled_value diff --git a/neps_examples/basic_usage/pytorch_nn_example.py b/neps_examples/basic_usage/pytorch_nn_example.py index 41242df56..4bf4022a7 100644 --- a/neps_examples/basic_usage/pytorch_nn_example.py +++ b/neps_examples/basic_usage/pytorch_nn_example.py @@ -11,7 +11,7 @@ import torch import torch.nn as nn import neps -from neps.space.neps_spaces.parameters import ( +from neps import ( PipelineSpace, Operation, Categorical, diff --git a/neps_examples/convenience/async_evaluation/submit.py b/neps_examples/convenience/async_evaluation/submit.py index 7bb9c6d4b..f6b99cde0 100644 --- a/neps_examples/convenience/async_evaluation/submit.py +++ b/neps_examples/convenience/async_evaluation/submit.py @@ -41,6 +41,6 @@ class ExampleSpace(neps.PipelineSpace): neps.run( evaluate_pipeline=evaluate_pipeline_via_slurm, pipeline_space=ExampleSpace(), - root_directory="results", + root_directory="results/async_evaluation", max_evaluations_per_run=2, ) diff --git a/neps_examples/convenience/config_creation.py b/neps_examples/convenience/config_creation.py new file mode 100644 index 000000000..b663d873b --- /dev/null +++ b/neps_examples/convenience/config_creation.py @@ -0,0 +1,35 @@ +"""How to create a NePS configuration manually which can then be used as imported trial.""" + +import neps +from pprint import pprint +import logging + + +# This example space demonstrates all types of parameters available in NePS. +class ExampleSpace(neps.PipelineSpace): + int1 = neps.Fidelity(neps.Integer(1, 10)) + float1 = neps.Float(0.0, 1.0) + cat1 = neps.Categorical(["a", "b", "c"]) + cat2 = neps.Categorical(["x", "y", float1]) + cat4 = neps.Categorical([neps.Resampled(cat2), neps.Resampled(cat1)]) + + +if __name__ == "__main__": + # We create a configuration interactively and receive both + # the configuration dictionary and a the corresponding pipeline. + config, pipeline = neps.create_config(ExampleSpace()) + print("Created configuration:") + pprint(config) + print("Sampled pipeline:") + print(pipeline, "\n") + # We can access the sampled values via e.g. pipeline.int1 + + logging.basicConfig(level=logging.INFO) + # The created configuration can then be used as an imported trial in NePS optimizers. + # We demonstrate this with the fictional result of objective_to_minimize = 0.5 + neps.import_trials( + ExampleSpace(), + [(config, neps.UserResultDict(objective_to_minimize=0.5))], + root_directory="results/created_config_example", + overwrite_root_directory=True, + ) diff --git a/neps_examples/basic_usage/example_import_trials.py b/neps_examples/convenience/import_trials.py similarity index 100% rename from neps_examples/basic_usage/example_import_trials.py rename to neps_examples/convenience/import_trials.py diff --git a/neps_examples/experimental/ask_and_tell_example.py b/neps_examples/experimental/ask_and_tell_example.py index d8f9dd325..afe1bd792 100644 --- a/neps_examples/experimental/ask_and_tell_example.py +++ b/neps_examples/experimental/ask_and_tell_example.py @@ -140,7 +140,7 @@ class MySpace(neps.PipelineSpace): help="Number of trials to evaluate in parallel initially" ) parser.add_argument( - "--results-dir", type=Path, default=Path("results"), + "--results-dir", type=Path, default=Path("results/ask_and_tell"), help="Path to save the results inside" ) args = parser.parse_args() diff --git a/pyproject.toml b/pyproject.toml index a85c35afb..aeece733d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -260,6 +260,10 @@ ignore = [ "E501", ] "docs/*" = ["INP001"] +"neps/space/neps_spaces/sampling.py" = [ + "T201", # print() is intentional for IOSampler user interaction + "RET508", # else after break is needed for error messages +] # TODO "neps/optimizers/**.py" = [ "D", # Documentation of everything diff --git a/tests/test_neps_space/test_space_conversion_and_compatibility.py b/tests/test_neps_space/test_space_conversion_and_compatibility.py index be908859f..1381db389 100644 --- a/tests/test_neps_space/test_space_conversion_and_compatibility.py +++ b/tests/test_neps_space/test_space_conversion_and_compatibility.py @@ -116,7 +116,7 @@ def test_convert_classic_to_neps(): assert neps_attrs["cat_param"].prior_confidence == ConfidenceLevel.LOW assert isinstance(neps_attrs["fidelity_param"], Fidelity) - assert isinstance(neps_attrs["fidelity_param"]._domain, Integer) + assert isinstance(neps_attrs["fidelity_param"].domain, Integer) # Constant should be preserved as-is assert neps_attrs["constant_param"] == "constant_value" From 7b9009d42c773f0a884e97562b9932ef8d6e90e8 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 4 Nov 2025 12:10:31 +0100 Subject: [PATCH 107/156] fix: update string representation test to reflect BasicSpace instead of PipelineSpace --- tests/test_neps_space/test_pipeline_space_methods.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_neps_space/test_pipeline_space_methods.py b/tests/test_neps_space/test_pipeline_space_methods.py index 3dbf58001..c91b1859a 100644 --- a/tests/test_neps_space/test_pipeline_space_methods.py +++ b/tests/test_neps_space/test_pipeline_space_methods.py @@ -389,6 +389,6 @@ def test_space_string_representation(): # Should be able to get string representation without error str_repr = str(modified_space) - assert "PipelineSpace" in str_repr + assert "BasicSpace" in str_repr assert "added_param" in str_repr assert "y" not in str_repr # Should be removed From 113379df9e11ead773cbe082a10a399d1ecc1e54 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 4 Nov 2025 17:00:03 +0100 Subject: [PATCH 108/156] fix: improve string representation for Categorical and Operation classes --- neps/space/neps_spaces/parameters.py | 42 +++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 21a81ce5b..313a1eff7 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -652,9 +652,19 @@ def __init__( def __str__(self) -> str: """Get a string representation of the categorical domain.""" - string = f"Categorical(choices={tuple([str(choice) for choice in self.choices])})" # type: ignore[union-attr] + str_choices = [ + ( + choice.__name__ # type: ignore[union-attr] + if (callable(choice) and not isinstance(choice, Resolvable)) + else str(choice) + ) + for choice in self.choices # type: ignore[union-attr] + ] + string = f"Categorical(choices = ({', '.join(str_choices)}))" if self.has_prior: - string += f", prior={self._prior}, prior_confidence={self._prior_confidence}" + string += ( + f", prior = {self._prior}, prior_confidence = {self._prior_confidence}" + ) string += ")" return string @@ -1288,9 +1298,33 @@ def __init__( def __str__(self) -> str: """Get a string representation of the operation.""" + if self._args != (): + args_str = ", args = " + if isinstance(self._args, Resolvable): + args_str = str(self._args) + else: + args_str = "(" + ", ".join(str(arg) for arg in self._args) + ")" + else: + args_str = "" + + if self._kwargs != {}: + kwargs_str = ", kwargs = " + if isinstance(self._kwargs, Resolvable): + kwargs_str += str(self._kwargs) + else: + kwargs_str += ( + "{" + ", ".join(f"{k}={v!s}" for k, v in self._kwargs.items()) + "}" + ) + else: + kwargs_str = "" + return ( - f"Operation(operator={self._operator!s}, args={self._args!s}," - f" kwargs={self._kwargs!s})" + "Operation(operator =" + f" { + self._operator + if isinstance(self._operator, str) + else self._operator.__name__ + }{args_str}{kwargs_str})" ) def compare_domain_to(self, other: object) -> bool: From 92ea9ceeff5ca9baae5f7bbacb4f9f0c93e59a85 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 4 Nov 2025 17:46:27 +0100 Subject: [PATCH 109/156] feat: add load_config function and update utility imports for better configuration handling --- docs/reference/neps_spaces.md | 19 +++-------- neps/__init__.py | 6 +++- neps/api.py | 60 ++++++++++++++++++++++++++++++++++- neps/utils/__init__.py | 6 ++++ 4 files changed, 74 insertions(+), 17 deletions(-) diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 323dd5526..4eaeafe75 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -214,26 +214,15 @@ def evaluate_pipeline(cnn: torch.nn.Module): ## Inspecting Configurations -NePS saves the configurations as paths, where each sampling decision is recorded. As they are hard to read, so you can load the configuration from the `results/.../configs` directory using the [`NepsCompatConverter`][neps.space.neps_spaces.neps_space.NepsCompatConverter] class, which converts the configuration such that it can be used with the NePS Spaces API: +NePS saves the configurations as paths, where each sampling decision is recorded. As they are hard to read, so you can load the configuration using `neps.load_config()`, which returns the resolved pipeline. You can print it, or convert specific operations to strings or callables using the utility functions `neps.convert_operation_to_string()` and `neps.convert_operation_to_callable()`: ```python -from neps.space.neps_spaces import neps_space -import yaml - -with open("Path/to/config.yaml", "r") as f: - conf_dict = yaml.safe_load(f) -config = NepsCompatConverter.from_neps_config(conf_dict) +import neps -# Use the resolution context to sample the configuration using a -# Sampler that follows the instructions in the configuration -resolved_pipeline, resolution_context = neps_space.resolve(pipeline=NN_Space(), - # Predefined samplings are the decisions made at each sampling step - domain_sampler=neps_space.OnlyPredefinedValuesSampler(predefined_samplings=config.predefined_samplings), - # Environment values are the fidelities and any arguments of the evaluation function not part of the search space - environment_values=config.environment_values) +pipeline = neps.load_config("Path/to/config.yaml", pipeline_space=SimpleSpace()) # The resolved_pipeline now contains all the parameters and their values, e.g. the Callable model -model_callable = neps_space.convert_operation_to_callable(operation=resolved_pipeline.model) +model_callable = neps.convert_operation_to_callable(resolved_pipeline.model) ``` ## Using ConfigSpace diff --git a/neps/__init__.py b/neps/__init__.py index a332fbbcf..441717a8f 100644 --- a/neps/__init__.py +++ b/neps/__init__.py @@ -5,7 +5,7 @@ and algorithms. """ -from neps.api import create_config, import_trials, run, save_pipeline_results +from neps.api import create_config, import_trials, load_config, run, save_pipeline_results from neps.optimizers import algorithms from neps.optimizers.ask_and_tell import AskAndTell from neps.optimizers.optimizer import SampledConfig @@ -25,6 +25,7 @@ from neps.state import BudgetInfo, Trial from neps.state.pipeline_eval import UserResultDict from neps.status.status import status +from neps.utils import convert_operation_to_callable, convert_operation_to_string from neps.utils.files import load_and_merge_yamls __all__ = [ @@ -47,9 +48,12 @@ "Trial", "UserResultDict", "algorithms", + "convert_operation_to_callable", + "convert_operation_to_string", "create_config", "import_trials", "load_and_merge_yamls", + "load_config", "plot", "run", "save_pipeline_results", diff --git a/neps/api.py b/neps/api.py index 35750a2c9..1caa955b9 100644 --- a/neps/api.py +++ b/neps/api.py @@ -9,6 +9,8 @@ from pathlib import Path from typing import TYPE_CHECKING, Any, Concatenate, Literal +import yaml + from neps.normalization import _normalize_imported_config from neps.optimizers import AskFunction, OptimizerChoice, load_optimizer from neps.runtime import _launch_runtime, _save_results @@ -674,4 +676,60 @@ def create_config( return NepsCompatConverter.to_neps_config(resolution_context), resolved_pipeline -__all__ = ["create_config", "import_trials", "run", "save_pipeline_results"] +def load_config( + config_path: Path | str, + pipeline_space: PipelineSpace, + config_id: str | None = None, +) -> PipelineSpace: + """Load a configuration from a neps config file. + + Args: + config_path: Path to the neps config file. + pipeline_space: The search space used to generate the configuration. + config_id: Optional config id to load, when only giving results folder. + + Returns: + The loaded pipeline space. + """ + from neps.space.neps_spaces.neps_space import NepsCompatConverter + from neps.space.neps_spaces.sampling import OnlyPredefinedValuesSampler + + str_path = str(config_path) + if not str_path.endswith(".yaml") and not str_path.endswith(".yml"): + if str_path.removesuffix("/").split("/")[-1].startswith("config_"): + str_path += "/config.yaml" + else: + if config_id is None: + raise ValueError( + "When providing a results folder, you must also provide a config_id." + ) + str_path = ( + str_path.removesuffix("/").removesuffix("configs") + + "/configs/" + + config_id + + "/config.yaml" + ) + + config_path = Path(str_path) + + with config_path.open("r") as f: + config_dict = yaml.load(f, Loader=yaml.SafeLoader) + + converted_dict = NepsCompatConverter.from_neps_config(config_dict) + + pipeline, _ = resolve( + pipeline_space, + domain_sampler=OnlyPredefinedValuesSampler(converted_dict.predefined_samplings), + environment_values=converted_dict.environment_values, + ) + + return pipeline + + +__all__ = [ + "create_config", + "import_trials", + "load_config", + "run", + "save_pipeline_results", +] diff --git a/neps/utils/__init__.py b/neps/utils/__init__.py index 7f2ee9e3f..35122741f 100644 --- a/neps/utils/__init__.py +++ b/neps/utils/__init__.py @@ -1,5 +1,11 @@ +from neps.space.neps_spaces.neps_space import ( + convert_operation_to_callable, + convert_operation_to_string, +) from neps.utils.trial_io import load_trials_from_pickle __all__ = [ + "convert_operation_to_callable", + "convert_operation_to_string", "load_trials_from_pickle", ] From 7885d2e108c228c871ba7a2233776f16a73a468d Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 4 Nov 2025 17:52:02 +0100 Subject: [PATCH 110/156] fix: simplify string representation in Operation class --- neps/space/neps_spaces/parameters.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 313a1eff7..eabebc134 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -1318,14 +1318,10 @@ def __str__(self) -> str: else: kwargs_str = "" - return ( - "Operation(operator =" - f" { - self._operator - if isinstance(self._operator, str) - else self._operator.__name__ - }{args_str}{kwargs_str})" + operator_name = ( + self._operator if isinstance(self._operator, str) else self._operator.__name__ ) + return f"Operation(operator ={operator_name}{args_str}{kwargs_str})" def compare_domain_to(self, other: object) -> bool: """Check if this operation parameter is equivalent to another. From c991ad9ff1e944753d30463960a1b6d6f4ca732f Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 4 Nov 2025 18:13:25 +0100 Subject: [PATCH 111/156] feat: add import_trials method to NePSRandomSearch and NePSRegularizedEvolution for importing external evaluations as trials --- neps/optimizers/neps_random_search.py | 64 ++++++++++++++++++- neps/optimizers/neps_regularized_evolution.py | 47 +++++++++++--- 2 files changed, 101 insertions(+), 10 deletions(-) diff --git a/neps/optimizers/neps_random_search.py b/neps/optimizers/neps_random_search.py index afc49c9e2..8030f261d 100644 --- a/neps/optimizers/neps_random_search.py +++ b/neps/optimizers/neps_random_search.py @@ -6,10 +6,11 @@ import heapq import random -from collections.abc import Mapping +from collections.abc import Mapping, Sequence from dataclasses import dataclass -from typing import TYPE_CHECKING, Literal +from typing import TYPE_CHECKING, Any, Literal +from neps.optimizers.optimizer import ImportedConfig from neps.space.neps_spaces.neps_space import _prepare_sampled_configs, resolve from neps.space.neps_spaces.parameters import Float, Integer from neps.space.neps_spaces.sampling import ( @@ -26,6 +27,7 @@ import neps.state.trial as trial_state from neps.optimizers import optimizer from neps.space.neps_spaces.parameters import PipelineSpace + from neps.state.pipeline_eval import UserResultDict from neps.state.trial import Trial @@ -135,6 +137,35 @@ def __call__( return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) + def import_trials( + self, + external_evaluations: Sequence[tuple[Mapping[str, Any], UserResultDict]], + trials: Mapping[str, Trial], + ) -> list[ImportedConfig]: + """Import external evaluations as trials. + + Args: + external_evaluations: A sequence of tuples containing configurations and + their evaluation results. + trials: A mapping of trial IDs to Trial objects, representing existing + trials. + + Returns: + A list of ImportedConfig objects representing the imported trials. + """ + n_trials = len(trials) + imported_configs = [] + for i, (config, result) in enumerate(external_evaluations): + config_id = str(n_trials + i + 1) + imported_configs.append( + ImportedConfig( + config=config, + id=config_id, + result=result, + ) + ) + return imported_configs + @dataclass class NePSComplexRandomSearch: @@ -389,3 +420,32 @@ def __call__( chosen_pipelines[0] = prior_pipeline return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) + + def import_trials( + self, + external_evaluations: Sequence[tuple[Mapping[str, Any], UserResultDict]], + trials: Mapping[str, Trial], + ) -> list[ImportedConfig]: + """Import external evaluations as trials. + + Args: + external_evaluations: A sequence of tuples containing configurations and + their evaluation results. + trials: A mapping of trial IDs to Trial objects, representing existing + trials. + + Returns: + A list of ImportedConfig objects representing the imported trials. + """ + n_trials = len(trials) + imported_configs = [] + for i, (config, result) in enumerate(external_evaluations): + config_id = str(n_trials + i + 1) + imported_configs.append( + ImportedConfig( + config=config, + id=config_id, + result=result, + ) + ) + return imported_configs diff --git a/neps/optimizers/neps_regularized_evolution.py b/neps/optimizers/neps_regularized_evolution.py index e49d94ac3..b544ad26c 100644 --- a/neps/optimizers/neps_regularized_evolution.py +++ b/neps/optimizers/neps_regularized_evolution.py @@ -4,10 +4,11 @@ import heapq import random -from collections.abc import Mapping +from collections.abc import Mapping, Sequence from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Literal +from neps.optimizers.optimizer import ImportedConfig from neps.space.neps_spaces.neps_space import ( SamplingResolutionContext, _prepare_sampled_configs, @@ -27,6 +28,7 @@ import neps.state.optimizer as optimizer_state import neps.state.trial as trial_state from neps.optimizers import optimizer + from neps.state.pipeline_eval import UserResultDict from neps.state.trial import Trial @@ -97,20 +99,20 @@ def __init__( for fidelity_name, fidelity_obj in fidelity_attrs.items(): # If the user specifically asked for the highest fidelity, use that. if ignore_fidelity == "highest fidelity": - self._environment_values[fidelity_name] = fidelity_obj.max_value + self._environment_values[fidelity_name] = fidelity_obj.upper # If the user asked to ignore fidelities, sample a value randomly from the # domain. elif ignore_fidelity is True: # Sample randomly from the fidelity bounds. - if isinstance(fidelity_obj._domain, Integer): - assert isinstance(fidelity_obj.min_value, int) - assert isinstance(fidelity_obj.max_value, int) + if isinstance(fidelity_obj.domain, Integer): + assert isinstance(fidelity_obj.lower, int) + assert isinstance(fidelity_obj.upper, int) self._environment_values[fidelity_name] = random.randint( - fidelity_obj.min_value, fidelity_obj.max_value + fidelity_obj.lower, fidelity_obj.upper ) - elif isinstance(fidelity_obj._domain, Float): + elif isinstance(fidelity_obj.domain, Float): self._environment_values[fidelity_name] = random.uniform( - fidelity_obj.min_value, fidelity_obj.max_value + fidelity_obj.lower, fidelity_obj.upper ) # By default we don't support fidelities unless explicitly requested. else: @@ -372,3 +374,32 @@ def _obj_key(trial: Trial) -> float: raise ValueError(f"Invalid mutation type: {self._mutation_type}") return _prepare_sampled_configs(return_pipelines, n_prev_trials, n_requested == 1) + + def import_trials( + self, + external_evaluations: Sequence[tuple[Mapping[str, Any], UserResultDict]], + trials: Mapping[str, Trial], + ) -> list[ImportedConfig]: + """Import external evaluations as trials. + + Args: + external_evaluations: A sequence of tuples containing configurations and + their evaluation results. + trials: A mapping of trial IDs to Trial objects, representing existing + trials. + + Returns: + A list of ImportedConfig objects representing the imported trials. + """ + n_trials = len(trials) + imported_configs = [] + for i, (config, result) in enumerate(external_evaluations): + config_id = str(n_trials + i + 1) + imported_configs.append( + ImportedConfig( + config=config, + id=config_id, + result=result, + ) + ) + return imported_configs From 6ef7b97dd348fc4939c1b996aa6bf7ce03945dda Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 4 Nov 2025 20:04:06 +0100 Subject: [PATCH 112/156] fix: update load_config return type and improve operation string representation --- neps/api.py | 19 ++++-- neps/space/neps_spaces/parameters.py | 12 ++-- neps/status/status.py | 68 ++++++++----------- .../basic_usage/pytorch_nn_example.py | 16 ----- 4 files changed, 51 insertions(+), 64 deletions(-) diff --git a/neps/api.py b/neps/api.py index 1caa955b9..562008ccb 100644 --- a/neps/api.py +++ b/neps/api.py @@ -19,9 +19,10 @@ check_neps_space_compatibility, convert_classic_to_neps_search_space, convert_neps_to_classic_search_space, + convert_operation_to_callable, resolve, ) -from neps.space.neps_spaces.parameters import PipelineSpace +from neps.space.neps_spaces.parameters import Operation, PipelineSpace from neps.space.parsing import convert_to_space from neps.state import NePSState, OptimizationState, SeedSnapshot from neps.status.status import post_run_csv @@ -680,7 +681,7 @@ def load_config( config_path: Path | str, pipeline_space: PipelineSpace, config_id: str | None = None, -) -> PipelineSpace: +) -> dict[str, Any]: """Load a configuration from a neps config file. Args: @@ -689,7 +690,7 @@ def load_config( config_id: Optional config id to load, when only giving results folder. Returns: - The loaded pipeline space. + The loaded configuration as a dictionary. """ from neps.space.neps_spaces.neps_space import NepsCompatConverter from neps.space.neps_spaces.sampling import OnlyPredefinedValuesSampler @@ -723,7 +724,17 @@ def load_config( environment_values=converted_dict.environment_values, ) - return pipeline + pipeline_dict = dict(**pipeline.get_attrs()) + + for name, value in pipeline_dict.items(): + if isinstance(value, Operation): + # If the operator is a not a string, we convert it to a callable. + if isinstance(value.operator, str): + pipeline_dict[name] = str(value) + else: + pipeline_dict[name] = convert_operation_to_callable(value) + + return pipeline_dict __all__ = [ diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index eabebc134..411ec68c6 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -1299,16 +1299,16 @@ def __init__( def __str__(self) -> str: """Get a string representation of the operation.""" if self._args != (): - args_str = ", args = " + args_str = "args=" if isinstance(self._args, Resolvable): - args_str = str(self._args) + args_str += str(self._args) else: - args_str = "(" + ", ".join(str(arg) for arg in self._args) + ")" + args_str += "(" + ", ".join(str(arg) for arg in self._args) + ")" else: args_str = "" if self._kwargs != {}: - kwargs_str = ", kwargs = " + kwargs_str = "kwargs=" if isinstance(self._kwargs, Resolvable): kwargs_str += str(self._kwargs) else: @@ -1318,10 +1318,12 @@ def __str__(self) -> str: else: kwargs_str = "" + args_str += ", " if args_str and kwargs_str else "" + operator_name = ( self._operator if isinstance(self._operator, str) else self._operator.__name__ ) - return f"Operation(operator ={operator_name}{args_str}{kwargs_str})" + return f"{operator_name}({args_str}{kwargs_str})" def compare_domain_to(self, other: object) -> bool: """Check if this operation parameter is equivalent to another. diff --git a/neps/status/status.py b/neps/status/status.py index 8b4ee7812..d752bb156 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -184,26 +184,21 @@ def num_pending(self) -> int: return len(self.by_state[State.PENDING]) def formatted( # noqa: PLR0912, C901 - self, pipeline_space_variables: tuple[PipelineSpace, list[str]] | None = None + self, pipeline_space: PipelineSpace | None = None ) -> str: """Return a formatted string of the summary. Args: - pipeline_space_variables: If provided, this tuple contains the Pipeline and a - list of variable names to format the config in the summary. This is useful - for pipelines that have a complex configuration structure, allowing for a - more readable output. + pipeline_space: The PipelineSpace used for the run. If provided, this is used + to format the best config in a more readable way. !!! Warning: - This is only supported when using NePS-only optimizers, such as - `neps.algorithms.neps_random_search`, - `neps.algorithms.complex_random_search` - or `neps.algorithms.neps_priorband`. When the search space is - simple enough, using `neps.algorithms.random_search` or - `neps.algorithms.priorband` is not enough, as it will be transformed - to a simpler HPO framework, which is incompatible with the - `pipeline_space_variables` argument. + This is only supported when using NePS-only optimizers. When the + search space is simple enough, using `neps.algorithms.random_search` + or `neps.algorithms.priorband` is not enough, as it will be + transformed to a simpler HPO framework, which is incompatible with + the `pipeline_space` argument. Returns: A formatted string of the summary. @@ -227,22 +222,25 @@ def formatted( # noqa: PLR0912, C901 best_summary = ( f"# Best Found (config {best_trial.metadata.id}):" "\n" - f"\n objective_to_minimize: {best_objective_to_minimize}" + f"\n objective_to_minimize: {best_objective_to_minimize}\n config: " ) - if pipeline_space_variables is None: - best_summary += f"\n config: {best_trial.config}" + if not pipeline_space: + best_summary += f"{best_trial.config}" else: best_config_resolve = NepsCompatConverter().from_neps_config( best_trial.config ) pipeline_configs = [] - for variable in pipeline_space_variables[1]: + variables = list(pipeline_space.get_attrs().keys()) + list( + pipeline_space.fidelity_attrs.keys() + ) + for variable in variables: pipeline_configs.append( neps_space.config_string.ConfigString( neps_space.convert_operation_to_string( getattr( neps_space.resolve( - pipeline_space_variables[0], + pipeline_space, OnlyPredefinedValuesSampler( best_config_resolve.predefined_samplings ), @@ -277,10 +275,7 @@ def formatted( # noqa: PLR0912, C901 formatted_config = "\n".join(indented_lines) else: formatted_config = pipeline_config # type: ignore - best_summary += ( - f"\n config: {pipeline_space_variables[1][n_pipeline]}\n " - f" {formatted_config}" - ) + best_summary += f"\n\t{variables[n_pipeline]}: {formatted_config}" best_summary += f"\n path: {best_trial.metadata.location}" @@ -337,28 +332,23 @@ def status( root_directory: str | Path, *, print_summary: bool = False, - pipeline_space_variables: tuple[PipelineSpace, list[str]] | None = None, + pipeline_space: PipelineSpace | None = None, ) -> tuple[pd.DataFrame, pd.Series]: """Print status information of a neps run and return results. Args: root_directory: The root directory given to neps.run. print_summary: If true, print a summary of the current run state. - pipeline_space_variables: If provided, this tuple contains the Pipeline and a - list of variable names to format the config in the summary. This is useful - for pipelines that have a complex configuration structure, allowing for a - more readable output. - - !!! Warning: - - This is only supported when using NePS-only optimizers, such as - `neps.algorithms.neps_random_search`, - `neps.algorithms.complex_random_search` - or `neps.algorithms.neps_priorband`. When the search space is - simple enough, using `neps.algorithms.random_search` or - `neps.algorithms.priorband` is not enough, as it will be transformed to a - simpler HPO framework, which is incompatible with the - `pipeline_space_variables` argument. + pipeline_space: The PipelineSpace used for the run. If provided, this is used to + format the best config in a more readable way. + + !!! Warning: + + This is only supported when using NePS-only optimizers. When the + search space is simple enough, using `neps.algorithms.random_search` + or `neps.algorithms.priorband` is not enough, as it will be + transformed to a simpler HPO framework, which is incompatible with + the `pipeline_space` argument. Returns: Dataframe of full results and short summary series. @@ -367,7 +357,7 @@ def status( summary = Summary.from_directory(root_directory) if print_summary: - print(summary.formatted(pipeline_space_variables=pipeline_space_variables)) + print(summary.formatted(pipeline_space=pipeline_space)) df = summary.df() diff --git a/neps_examples/basic_usage/pytorch_nn_example.py b/neps_examples/basic_usage/pytorch_nn_example.py index 4bf4022a7..d7a0a87b4 100644 --- a/neps_examples/basic_usage/pytorch_nn_example.py +++ b/neps_examples/basic_usage/pytorch_nn_example.py @@ -119,22 +119,6 @@ class NN_Space(PipelineSpace): ) -# Sampling and printing one random configuration of the pipeline -pipeline = NN_Space() -resolved_pipeline, resolution_context = neps_space.resolve(pipeline) - -s = resolved_pipeline.model -s_config_string = neps_space.convert_operation_to_string(s) -pretty_config = neps_space.config_string.ConfigString(s_config_string).pretty_format() -s_callable = neps_space.convert_operation_to_callable(s) - -print("Callable:\n") -print(s_callable) - -print("\n\nConfig string:\n") -print(pretty_config) - - # Defining the pipeline, using the model from the NN_space space as callable def evaluate_pipeline(model: nn.Sequential): x = torch.ones(size=[1, 3, 220, 220]) From 83dbd61014a93b4c513df09ede8ab24b59669281 Mon Sep 17 00:00:00 2001 From: Anton Merlin Geburek <43831195+Meganton@users.noreply.github.com> Date: Tue, 4 Nov 2025 20:59:40 +0100 Subject: [PATCH 113/156] fix: change status parameters --- neps_examples/basic_usage/pytorch_nn_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neps_examples/basic_usage/pytorch_nn_example.py b/neps_examples/basic_usage/pytorch_nn_example.py index d7a0a87b4..710987b42 100644 --- a/neps_examples/basic_usage/pytorch_nn_example.py +++ b/neps_examples/basic_usage/pytorch_nn_example.py @@ -139,5 +139,5 @@ def evaluate_pipeline(model: nn.Sequential): neps.status( "results/neps_spaces_nn_example", print_summary=True, - pipeline_space_variables=(pipeline_space, ["model"]), + pipeline_space=pipeline_space, ) From e9cff1ccc42648779152b0fc3ce7f021554bfcba Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 5 Nov 2025 11:54:02 +0100 Subject: [PATCH 114/156] fix: prevent resampling of Fidelity objects in Resampled class and add Operation to config_creation example --- neps/space/neps_spaces/parameters.py | 2 ++ neps_examples/convenience/config_creation.py | 12 +++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 411ec68c6..88a23dd41 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -1439,6 +1439,8 @@ def __init__(self, source: Resolvable | str): Args: source: The source of the resampling, can be a resolvable object or a string. """ + if isinstance(source, Fidelity): + raise ValueError("Fidelity objects cannot be resampled.") self._source = source def __str__(self) -> str: diff --git a/neps_examples/convenience/config_creation.py b/neps_examples/convenience/config_creation.py index b663d873b..c2140fde0 100644 --- a/neps_examples/convenience/config_creation.py +++ b/neps_examples/convenience/config_creation.py @@ -11,7 +11,17 @@ class ExampleSpace(neps.PipelineSpace): float1 = neps.Float(0.0, 1.0) cat1 = neps.Categorical(["a", "b", "c"]) cat2 = neps.Categorical(["x", "y", float1]) - cat4 = neps.Categorical([neps.Resampled(cat2), neps.Resampled(cat1)]) + operation1 = neps.Categorical( + choices=[ + "option1", + "option2", + neps.Operation( + operator="option3", + args=(float1, neps.Resampled(cat1)), + kwargs={"param1": neps.Resampled(float1)}, + ) + ] + ) if __name__ == "__main__": From 7275fad769b1cefb2d723219080f16ecf5bfe51e Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 6 Nov 2025 10:11:26 +0100 Subject: [PATCH 115/156] fix: enhance configuration handling by updating return types and adding prior checks in PipelineSpace --- docs/reference/neps_spaces.md | 9 ++-- neps/api.py | 15 +++++- neps/optimizers/algorithms.py | 50 ++++---------------- neps/space/neps_spaces/parameters.py | 11 +++++ neps_examples/convenience/config_creation.py | 8 ++-- tests/test_state/test_neps_state.py | 10 +--- 6 files changed, 43 insertions(+), 60 deletions(-) diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 4eaeafe75..91ba38f0d 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -214,15 +214,16 @@ def evaluate_pipeline(cnn: torch.nn.Module): ## Inspecting Configurations -NePS saves the configurations as paths, where each sampling decision is recorded. As they are hard to read, so you can load the configuration using `neps.load_config()`, which returns the resolved pipeline. You can print it, or convert specific operations to strings or callables using the utility functions `neps.convert_operation_to_string()` and `neps.convert_operation_to_callable()`: +NePS saves the configurations as paths, where each sampling decision is recorded. As they are hard to read, so you can load the configuration using `neps.load_config()`, which returns a dictionary with the resolved parameters and their values: ```python import neps -pipeline = neps.load_config("Path/to/config.yaml", pipeline_space=SimpleSpace()) +pipeline = neps.load_config("Path/to/config.yaml", pipeline_space=SimpleSpace()) # or +pipeline = neps.load_config("Path/to/neps_folder", config_id="config_0", pipeline_space=SimpleSpace()) -# The resolved_pipeline now contains all the parameters and their values, e.g. the Callable model -model_callable = neps.convert_operation_to_callable(resolved_pipeline.model) +# The pipeline now contains all the parameters and their values the same way they would be given to the evaluate_pipeline, e.g. the callable model: +model = pipeline["model"] ``` ## Using ConfigSpace diff --git a/neps/api.py b/neps/api.py index 562008ccb..149e6814e 100644 --- a/neps/api.py +++ b/neps/api.py @@ -659,7 +659,7 @@ def import_trials( def create_config( pipeline_space: PipelineSpace, -) -> tuple[Mapping[str, Any], PipelineSpace]: +) -> tuple[Mapping[str, Any], dict[str, Any]]: """Create a configuration by prompting the user for input. Args: @@ -674,7 +674,18 @@ def create_config( resolved_pipeline, resolution_context = resolve( pipeline_space, domain_sampler=IOSampler() ) - return NepsCompatConverter.to_neps_config(resolution_context), resolved_pipeline + + pipeline_dict = dict(**resolved_pipeline.get_attrs()) + + for name, value in pipeline_dict.items(): + if isinstance(value, Operation): + # If the operator is a not a string, we convert it to a callable. + if isinstance(value.operator, str): + pipeline_dict[name] = str(value) + else: + pipeline_dict[name] = convert_operation_to_callable(value) + + return NepsCompatConverter.to_neps_config(resolution_context), pipeline_dict def load_config( diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 1e404539b..b45ff67c6 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -51,11 +51,7 @@ convert_neps_to_classic_search_space, ) from neps.space.neps_spaces.parameters import ( - Categorical, - Float, - Integer, PipelineSpace, - Resolvable, ) from neps.space.neps_spaces.sampling import ( DomainSampler, @@ -434,10 +430,13 @@ def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 ) -def determine_optimizer_automatically(space: SearchSpace | PipelineSpace) -> str: +def determine_optimizer_automatically(space: SearchSpace | PipelineSpace) -> str: # noqa: PLR0911 if isinstance(space, PipelineSpace): - if space.fidelity_attrs: + has_prior = space.has_priors() + if space.fidelity_attrs and has_prior: return "neps_priorband" + if space.fidelity_attrs and not has_prior: + return "neps_hyperband" return "complex_random_search" has_prior = any( parameter.prior is not None for parameter in space.searchables.values() @@ -620,18 +619,7 @@ def neps_grid_search( "This optimizer only supports NePS spaces, please use a classic" " search space-compatible optimizer." ) - parameters = pipeline_space.get_attrs().values() - non_fid_parameters = [ - parameter - for parameter in parameters - if parameter not in pipeline_space.fidelity_attrs.values() - ] - if any( - parameter.has_prior # type: ignore - for parameter in non_fid_parameters - if isinstance(parameter, Resolvable) - and isinstance(parameter, Integer | Float | Categorical) - ): + if pipeline_space.has_priors(): logger.warning("Grid search does not support priors, they will be ignored.") if not pipeline_space.fidelity_attrs and ignore_fidelity: logger.warning( @@ -1625,18 +1613,7 @@ def neps_random_search( "You are using ignore_fidelity, but no fidelity is defined in the" " search space. Consider setting ignore_fidelity to False." ) - parameters = pipeline_space.get_attrs().values() - non_fid_parameters = [ - parameter - for parameter in parameters - if parameter not in pipeline_space.fidelity_attrs.values() - ] - if use_priors and not any( - parameter.has_prior # type: ignore - for parameter in non_fid_parameters - if isinstance(parameter, Resolvable) - and isinstance(parameter, Integer | Float | Categorical) - ): + if use_priors and not pipeline_space.has_priors(): logger.warning( "You have set use_priors=True, but no priors are defined in the search space." ) @@ -1785,18 +1762,7 @@ def neps_priorband( Returns: An instance of _BracketOptimizer configured for PriorBand sampling. """ - parameters = pipeline_space.get_attrs().values() - non_fid_parameters = [ - parameter - for parameter in parameters - if parameter not in pipeline_space.fidelity_attrs.values() - and isinstance(parameter, Integer | Float | Categorical) - ] - if not any( - parameter.has_prior # type: ignore - for parameter in non_fid_parameters - if isinstance(parameter, Resolvable) - ): + if not pipeline_space.has_priors(): logger.warning( "Warning: No priors are defined in the search space, priorband will sample" " uniformly. Consider using hyperband instead." diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 88a23dd41..579aa7b76 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -222,6 +222,17 @@ def get_attrs(self) -> Mapping[str, Any]: return attrs + def has_priors(self) -> bool: + """Check if any parameter in the pipeline has priors defined. + + Returns: + True if any parameter has priors, False otherwise. + """ + for param in self.get_attrs().values(): + if hasattr(param, "has_prior") and param.has_prior: + return True + return False + def from_attrs(self, attrs: Mapping[str, Any]) -> PipelineSpace: """Create a new Pipeline instance from the given attributes. diff --git a/neps_examples/convenience/config_creation.py b/neps_examples/convenience/config_creation.py index c2140fde0..97d69deef 100644 --- a/neps_examples/convenience/config_creation.py +++ b/neps_examples/convenience/config_creation.py @@ -19,20 +19,20 @@ class ExampleSpace(neps.PipelineSpace): operator="option3", args=(float1, neps.Resampled(cat1)), kwargs={"param1": neps.Resampled(float1)}, - ) + ), ] ) if __name__ == "__main__": # We create a configuration interactively and receive both - # the configuration dictionary and a the corresponding pipeline. + # the configuration dictionary and a dictionary of the sampled parameters. config, pipeline = neps.create_config(ExampleSpace()) print("Created configuration:") pprint(config) print("Sampled pipeline:") - print(pipeline, "\n") - # We can access the sampled values via e.g. pipeline.int1 + pprint(pipeline) + logging.basicConfig(level=logging.INFO) # The created configuration can then be used as an imported trial in NePS optimizers. diff --git a/tests/test_state/test_neps_state.py b/tests/test_state/test_neps_state.py index cb59800a3..c0c95375a 100644 --- a/tests/test_state/test_neps_state.py +++ b/tests/test_state/test_neps_state.py @@ -152,10 +152,7 @@ def optimizer_and_key_and_search_space( if key in JUST_SKIP: pytest.xfail(f"{key} is not instantiable") - if key in NO_DEFAULT_PRIOR_SUPPORT and any( - parameter.has_prior if hasattr(parameter, "has_prior") else False - for parameter in search_space.get_attrs().values() - ): + if key in NO_DEFAULT_PRIOR_SUPPORT and search_space.has_priors(): pytest.xfail(f"{key} crashed with a prior") if search_space.fidelity_attrs and key in NO_DEFAULT_FIDELITY_SUPPORT: @@ -164,10 +161,7 @@ def optimizer_and_key_and_search_space( if key in REQUIRES_FIDELITY and not search_space.fidelity_attrs: pytest.xfail(f"{key} requires a fidelity parameter") - if key in REQUIRES_PRIOR and not any( - parameter.has_prior if hasattr(parameter, "has_prior") else False - for parameter in search_space.get_attrs().values() - ): + if key in REQUIRES_PRIOR and not search_space.has_priors(): pytest.xfail(f"{key} requires a prior") if key in REQUIRES_FIDELITY_MO and not search_space.fidelity_attrs: From 282a13edc10c9fc1fee94c6109fb8e69f61056c5 Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 21 Nov 2025 00:34:44 +0100 Subject: [PATCH 116/156] feat: Enhance PipelineSpace functionality and persistence - Updated PipelineSpace class to dynamically create new classes when adding or removing parameters, ensuring compatibility with pickling. - Modified NePSState to support saving and loading of PipelineSpace and SearchSpace, including validation checks for consistency. - Implemented error handling for mismatched pipeline spaces during state loading and trial imports. - Added tests for search space persistence, validation, and integration with NePSState. - Updated status function to handle cases where pipeline space is not provided, ensuring backward compatibility. - Removed obsolete normalization test script. --- docs/reference/neps_run.md | 44 +- docs/reference/neps_spaces.md | 21 + docs/reference/optimizers.md | 25 + neps/__init__.py | 12 +- neps/api.py | 330 +++++++++++-- neps/runtime.py | 7 + neps/space/neps_spaces/parameters.py | 86 +++- neps/state/neps_state.py | 76 ++- neps/status/status.py | 51 ++- .../basic_usage/pytorch_nn_example.py | 1 - test_all_pickle.py | 36 ++ test_normalization_fix.py | 38 -- .../test_search_space_persistence.py | 433 ++++++++++++++++++ .../test_search_space_validation.py | 252 ++++++++++ 14 files changed, 1289 insertions(+), 123 deletions(-) create mode 100644 test_all_pickle.py delete mode 100644 test_normalization_fix.py create mode 100644 tests/test_state/test_search_space_persistence.py create mode 100644 tests/test_state/test_search_space_validation.py diff --git a/docs/reference/neps_run.md b/docs/reference/neps_run.md index ecf0d748b..73d293e1f 100644 --- a/docs/reference/neps_run.md +++ b/docs/reference/neps_run.md @@ -99,13 +99,55 @@ def run(learning_rate: float, epochs: int) -> float: return {"objective_to_minimize": loss, "cost": duration} neps.run( - # Increase the total number of trials from 10 as set previously to 50 + # Increase the total number of trials from 10 as set previously to e.g. 50 evaluations_to_spend=50, ) ``` If the run previously stopped due to reaching a budget and you specify the same budget, the worker will immediatly stop as it will remember the amount of budget it used previously. +!!! note "Auto-loading" + + When continuing a run, NePS automatically loads the search space and optimizer configuration from disk. You don't need to specify `pipeline_space=` or `searcher=` again - NePS will use the saved settings from the original run. + +## Reconstructing and Reproducing Runs + +Sometimes you want to inspect what settings were used in a previous run, or reproduce a run with the same or modified settings. NePS provides utility functions to load both the search space and optimizer information: + +```python +import neps + +# Load everything from a previous run +root_dir = "path/to/previous_run" + +pipeline_space = neps.load_pipeline_space(root_dir) +optimizer_info = neps.load_optimizer_info(root_dir) + +print(f"Original optimizer: {optimizer_info['name']}") +print(f"Original search space: {pipeline_space}") + +# Option 1: Continue the original run (auto-loads everything) +neps.run( + evaluate_pipeline=my_function, + root_directory=root_dir, + evaluations_to_spend=100, # Increase budget +) + +# Option 2: Start a new run with the same settings +neps.run( + evaluate_pipeline=my_function, + pipeline_space=pipeline_space, + root_directory="path/to/new_run", + searcher=optimizer_info['name'], + evaluations_to_spend=50, +) +``` + +For details on: + +- [`neps.load_pipeline_space()`][neps.api.load_pipeline_space] - see [Search Space Reference](neps_spaces.md#loading-the-search-space-from-disk) +- [`neps.load_optimizer_info()`][neps.api.load_optimizer_info] - see [Optimizer Reference](optimizers.md#loading-optimizer-information) + ## Overwriting a Run To overwrite a run, simply provide the same `root_directory=` to [`neps.run()`][neps.api.run] as before, with the `overwrite_root_directory=True` argument. diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index 91ba38f0d..ae7189a33 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -226,6 +226,27 @@ pipeline = neps.load_config("Path/to/neps_folder", config_id="config_0", pipelin model = pipeline["model"] ``` +### Loading the Search Space from Disk + +NePS automatically saves the search space when you run an optimization. You can retrieve it later using `neps.load_pipeline_space()`: + +```python +import neps + +# Load the search space from a previous run +pipeline_space = neps.load_pipeline_space("Path/to/neps_folder") + +# Now you can use it to inspect configurations, continue runs, or analysis +``` + +!!! note "Auto-loading" + + In most cases, you don't need to call `load_pipeline_space()` explicitly. When continuing a run, `neps.run()` automatically loads the search space from disk. See [Continuing Runs](neps_run.md#continuing-runs) for more details. + +!!! tip "Reconstructing a Run" + + You can load both the search space and optimizer information to fully reconstruct a previous run. See [Reconstructing and Reproducing Runs](neps_run.md#reconstructing-and-reproducing-runs) for a complete example. + ## Using ConfigSpace For users familiar with the [`ConfigSpace`](https://automl.github.io/ConfigSpace/main/) library, diff --git a/docs/reference/optimizers.md b/docs/reference/optimizers.md index a4159c42a..61fb48b78 100644 --- a/docs/reference/optimizers.md +++ b/docs/reference/optimizers.md @@ -110,6 +110,31 @@ neps.run( ) ``` +### 2.4 Loading Optimizer Information + +NePS automatically saves the optimizer metadata (name and configuration) when you run an optimization. You can retrieve this information later using `neps.load_optimizer_info()`: + +```python +import neps + +# Load the optimizer info from a previous run +optimizer_info = neps.load_optimizer_info("path/to/neps_folder") + +# Access the optimizer name and configuration +print(f"Optimizer: {optimizer_info['name']}") +print(f"Configuration: {optimizer_info['info']}") +``` + +This is useful for: + +- **Inspecting** what optimizer and settings were used in a previous run +- **Reproducing** experiments with the same optimizer configuration +- **Comparing** different optimizer settings across runs + +!!! tip "Reconstructing a Complete Run" + + Combine `load_optimizer_info()` with `load_pipeline_space()` to fully reconstruct a previous optimization. See [Reconstructing and Reproducing Runs](neps_run.md#reconstructing-and-reproducing-runs) for a complete example. + ## 3 Custom Optimizers To design entirely new optimizers, you can define them as class with a `__call__` method outside of NePS and pass them to the `neps.run()` function: diff --git a/neps/__init__.py b/neps/__init__.py index 441717a8f..583ca5b77 100644 --- a/neps/__init__.py +++ b/neps/__init__.py @@ -5,7 +5,15 @@ and algorithms. """ -from neps.api import create_config, import_trials, load_config, run, save_pipeline_results +from neps.api import ( + create_config, + import_trials, + load_config, + load_optimizer_info, + load_pipeline_space, + run, + save_pipeline_results, +) from neps.optimizers import algorithms from neps.optimizers.ask_and_tell import AskAndTell from neps.optimizers.optimizer import SampledConfig @@ -54,6 +62,8 @@ "import_trials", "load_and_merge_yamls", "load_config", + "load_optimizer_info", + "load_pipeline_space", "plot", "run", "save_pipeline_results", diff --git a/neps/api.py b/neps/api.py index 67064b357..9d1562830 100644 --- a/neps/api.py +++ b/neps/api.py @@ -12,7 +12,7 @@ import yaml from neps.normalization import _normalize_imported_config -from neps.optimizers import AskFunction, OptimizerChoice, load_optimizer +from neps.optimizers import AskFunction, OptimizerChoice, OptimizerInfo, load_optimizer from neps.runtime import _launch_runtime, _save_results from neps.space.neps_spaces.neps_space import ( adjust_evaluation_pipeline_for_neps_space, @@ -39,9 +39,9 @@ logger = logging.getLogger(__name__) -def run( # noqa: C901, D417, PLR0913 +def run( # noqa: C901, D417, PLR0912, PLR0913 evaluate_pipeline: Callable[..., EvaluatePipelineReturn] | str, - pipeline_space: ConfigurationSpace | PipelineSpace, + pipeline_space: ConfigurationSpace | PipelineSpace | None = None, *, root_directory: str | Path = "neps_results", overwrite_root_directory: bool = False, @@ -136,9 +136,18 @@ class MySpace(PipelineSpace): to specify the function to call. You may also directly provide an mode to import, e.g., `"my.module.something:evaluate_pipeline"`. - pipeline_space: The search space to minimize over. + pipeline_space: The pipeline space to minimize over. - This most direct way to specify the search space is as follows: + !!! tip "Optional for continuing runs" + + This parameter is **required** for the first run but **optional** when + continuing an existing optimization. If not provided, NePS will + automatically load the pipeline space from `root_directory/pipeline_space.pkl`. + + When provided for a continuing run, NePS will validate that it matches + the one saved on disk to prevent inconsistencies. + + This most direct way to specify the pipeline space is as follows: ```python MySpace(PipelineSpace): @@ -258,7 +267,7 @@ class MySpace(PipelineSpace): optimizer: Which optimizer to use. Not sure which to use? Leave this at `"auto"` and neps will - choose the optimizer based on the search space given. + choose the optimizer based on the pipeline space given. ??? note "Available optimizers" @@ -272,7 +281,7 @@ class MySpace(PipelineSpace): ```python neps.run( ..., - optimzier=("priorband", + optimizer=("priorband", { "sample_prior_first": True, } @@ -323,6 +332,27 @@ def __call__( "`evaluations_to_spend` for limiting the number of evaluations for this run.", ) + # Try to load pipeline_space from disk if not provided + if pipeline_space is None: + root_path = Path(root_directory) + if root_path.exists() and not overwrite_root_directory: + try: + pipeline_space = load_pipeline_space(root_path) + logger.info( + "Loaded pipeline space from disk. Continuing optimization with " + f"existing pipeline space from {root_path}" + ) + except (FileNotFoundError, ValueError) as e: + # If loading fails, we'll error below + logger.debug(f"Could not load pipeline space from disk: {e}") + + # If still None, raise error + if pipeline_space is None: + raise ValueError( + "pipeline_space is required for the first run. For continuing an" + " existing run, the pipeline space will be loaded from disk. No existing" + f" pipeline space found at: {root_directory}" + ) controling_params = { "evaluations_to_spend": evaluations_to_spend, "cost_to_spend": cost_to_spend, @@ -427,6 +457,7 @@ def __call__( overwrite_optimization_dir=overwrite_root_directory, sample_batch_size=sample_batch_size, worker_id=worker_id, + pipeline_space=pipeline_space, ) post_run_csv(root_directory) @@ -475,10 +506,10 @@ def save_pipeline_results( ) -def import_trials( - pipeline_space: SearchSpace | PipelineSpace, +def import_trials( # noqa: C901 evaluated_trials: Sequence[tuple[Mapping[str, Any], UserResultDict],], root_directory: Path | str, + pipeline_space: SearchSpace | PipelineSpace | None = None, overwrite_root_directory: bool = False, # noqa: FBT001, FBT002 optimizer: ( OptimizerChoice @@ -497,11 +528,14 @@ def import_trials( removes duplicates, and updates the optimization state accordingly. Args: - pipeline_space (SearchSpace): The search space used for the optimization. evaluated_trials (Sequence[tuple[Mapping[str, Any], UserResultDict]]): A sequence of tuples, each containing a configuration dictionary and its corresponding result. root_directory (Path or str): The root directory of the NePS run. + pipeline_space (SearchSpace | PipelineSpace | None): The pipeline space + used for the optimization. If None, will attempt to load from the + root_directory. If provided and a pipeline space exists on disk, they + will be validated to match. overwrite_root_directory (bool, optional): If True, overwrite the existing root directory. Defaults to False. optimizer: The optimizer to use for importing trials. @@ -512,7 +546,8 @@ def import_trials( None Raises: - ValueError: If any configuration or result is invalid. + ValueError: If any configuration or result is invalid, or if pipeline_space + cannot be determined (neither provided nor found on disk). FileNotFoundError: If the root directory does not exist. Exception: For unexpected errors during trial import. @@ -524,11 +559,34 @@ def import_trials( ... ({"param1": 0.5, "param2": 10}, ... UserResultDict(objective_to_minimize=-5.0)), ... ] - >>> neps.import_trials(pipeline_space, evaluated_trials, "my_results") + >>> neps.import_trials(evaluated_trials, "my_results", pipeline_space) """ if isinstance(root_directory, str): root_directory = Path(root_directory) + # Try to load pipeline_space from disk if not provided + if pipeline_space is None: + if root_directory.exists() and not overwrite_root_directory: + try: + pipeline_space = load_pipeline_space(root_directory) + logger.info( + "Loaded pipeline space from disk. Importing trials with " + f"existing pipeline space from {root_directory}" + ) + except (FileNotFoundError, ValueError) as e: + # If loading fails, we'll error below + logger.debug(f"Could not load pipeline space from disk: {e}") + + # If still None, raise error + if pipeline_space is None: + raise ValueError( + "pipeline_space is required when importing trials to a new run. " + "For importing to an existing run, the pipeline space will be loaded " + f"from disk. No existing pipeline space found at: {root_directory}" + ) + # Note: If pipeline_space is provided, it will be validated against the one on disk + # by NePSState.create_or_load() after necessary conversions are applied + neps_classic_space_compatibility = check_neps_space_compatibility(optimizer) if neps_classic_space_compatibility in ["both", "classic"] and isinstance( pipeline_space, PipelineSpace @@ -543,11 +601,11 @@ def import_trials( ): space = convert_classic_to_neps_search_space(space) - # Optimizer check, if the search space is a Pipeline and the optimizer is not a NEPS + # Optimizer check, if the pipeline space is a Pipeline and the optimizer is not a NEPS # algorithm, we raise an error, as the optimizer is not compatible. if isinstance(space, PipelineSpace) and neps_classic_space_compatibility == "classic": raise ValueError( - "The provided optimizer is not compatible with this complex search space. " + "The provided optimizer is not compatible with this complex pipeline space. " "Please use one that is, such as 'random_search', 'hyperband', " "'priorband', or 'complex_random_search'." ) @@ -567,6 +625,7 @@ def import_trials( optimizer_state=OptimizationState( budget=None, seed_snapshot=SeedSnapshot.new_capture(), shared_state={} ), + pipeline_space=space, ) normalized_trials = [] @@ -608,7 +667,7 @@ def create_config( """Create a configuration by prompting the user for input. Args: - pipeline_space: The pipeline search space to create a configuration for. + pipeline_space: The pipeline space to create a configuration for. Returns: A tuple containing the created configuration dictionary and the sampled pipeline. @@ -633,45 +692,145 @@ def create_config( return NepsCompatConverter.to_neps_config(resolution_context), pipeline_dict -def load_config( +def load_config( # noqa: C901, PLR0912, PLR0915 config_path: Path | str, - pipeline_space: PipelineSpace, + pipeline_space: PipelineSpace | SearchSpace | None = None, config_id: str | None = None, ) -> dict[str, Any]: """Load a configuration from a neps config file. Args: config_path: Path to the neps config file. - pipeline_space: The search space used to generate the configuration. + pipeline_space: The pipeline space used to generate the configuration. + If None, will attempt to load from the NePSState directory. config_id: Optional config id to load, when only giving results folder. Returns: The loaded configuration as a dictionary. + + Raises: + ValueError: If pipeline_space is not provided and cannot be loaded from disk. """ from neps.space.neps_spaces.neps_space import NepsCompatConverter from neps.space.neps_spaces.sampling import OnlyPredefinedValuesSampler + # Try to load pipeline_space from NePSState if not provided + state = None # Track state for later use in config loading + + if pipeline_space is None: + try: + # Extract the root directory from config_path + str_path_temp = str(config_path) + if "/configs/" in str_path_temp or "\\configs\\" in str_path_temp: + root_dir = Path( + str_path_temp.split("/configs/")[0].split("\\configs\\")[0] + ) + else: + root_dir = Path(str_path_temp).parent.parent + + state = NePSState.create_or_load(path=root_dir, load_only=True) + pipeline_space = state.lock_and_get_search_space() + + if pipeline_space is None: + raise ValueError( + "Could not load pipeline_space from disk. " + "Please provide pipeline_space argument or ensure " + "the NePSState was created with search_space saved." + ) + except Exception as e: + raise ValueError( + f"pipeline_space not provided and could not be loaded from disk: {e}" + ) from e + else: + # User provided a pipeline_space - validate it matches the one on disk + from neps.exceptions import NePSError + + try: + str_path_temp = str(config_path) + if "/configs/" in str_path_temp or "\\configs\\" in str_path_temp: + root_dir = Path( + str_path_temp.split("/configs/")[0].split("\\configs\\")[0] + ) + else: + root_dir = Path(str_path_temp).parent.parent + + state = NePSState.create_or_load(path=root_dir, load_only=True) + disk_space = state.lock_and_get_search_space() + + if disk_space is not None: + # Validate that provided space matches disk space + import pickle + + if pickle.dumps(disk_space) != pickle.dumps(pipeline_space): + raise NePSError( + "The pipeline_space provided does not match the one saved on" + " disk.\\nPipeline space location:" + f" {root_dir / 'pipeline_space.pkl'}\\nPlease either:\\n 1." + " Don't provide pipeline_space (it will be loaded" + " automatically), or\\n 2. Provide the same pipeline_space that" + " was used in neps.run()" + ) + except NePSError: + raise + except Exception: # noqa: S110, BLE001 + # If we can't load/validate, just continue with provided space + pass + + # Determine config_id from path str_path = str(config_path) + trial_id = None + if not str_path.endswith(".yaml") and not str_path.endswith(".yml"): if str_path.removesuffix("/").split("/")[-1].startswith("config_"): - str_path += "/config.yaml" + # Extract trial_id from path like "configs/config_1" + # or "configs/config_1_rung_0" + trial_id = str_path.removesuffix("/").split("/")[-1] else: if config_id is None: raise ValueError( "When providing a results folder, you must also provide a config_id." ) - str_path = ( - str_path.removesuffix("/").removesuffix("configs") - + "/configs/" - + config_id - + "/config.yaml" - ) - - config_path = Path(str_path) - - with config_path.open("r") as f: - config_dict = yaml.load(f, Loader=yaml.SafeLoader) - + trial_id = config_id + else: + # Extract trial_id from yaml path like "configs/config_1/config.yaml" + path_parts = str_path.replace("\\", "/").split("/") + for i, part in enumerate(path_parts): + if part == "configs" and i + 1 < len(path_parts): + trial_id = path_parts[i + 1] + break + + # Use the locked method from NePSState to safely read the trial + if trial_id is not None and state is not None: + try: + trial = state.lock_and_get_trial_by_id(trial_id) + config_dict = dict(trial.config) # Convert Mapping to dict + except Exception: # noqa: BLE001 + # Fallback to direct file read if trial can't be loaded + str_path_fallback = str(config_path) + if not str_path_fallback.endswith(".yaml") and not str_path_fallback.endswith( + ".yml" + ): + str_path_fallback += "/config.yaml" + config_path = Path(str_path_fallback) + with config_path.open("r") as f: + config_dict = yaml.load(f, Loader=yaml.SafeLoader) + else: + # Fallback to direct file read + str_path_fallback = str(config_path) + if not str_path_fallback.endswith(".yaml") and not str_path_fallback.endswith( + ".yml" + ): + str_path_fallback += "/config.yaml" + config_path = Path(str_path_fallback) + with config_path.open("r") as f: + config_dict = yaml.load(f, Loader=yaml.SafeLoader) + + # Handle different pipeline space types + if not isinstance(pipeline_space, PipelineSpace): + # For SearchSpace (classic), just return the config dict + return dict(config_dict) if isinstance(config_dict, Mapping) else config_dict + + # For PipelineSpace, resolve it converted_dict = NepsCompatConverter.from_neps_config(config_dict) pipeline, _ = resolve( @@ -693,10 +852,119 @@ def load_config( return pipeline_dict +def load_pipeline_space( + root_directory: str | Path, +) -> PipelineSpace | SearchSpace: + """Load the pipeline space from a neps run directory. + + This is a convenience function that loads the pipeline space that was saved + during a neps.run() call. The pipeline space is automatically saved to disk + and can be loaded to inspect it or use it with other neps utilities. + + Args: + root_directory: Path to the neps results directory (the same path + that was passed to neps.run()). + + Returns: + The pipeline space that was used in the neps run. + + Raises: + FileNotFoundError: If no neps state is found at the given path. + ValueError: If no pipeline space was saved in the neps run. + + Example: + ```python + # After running neps + neps.run( + evaluate_pipeline=my_function, + pipeline_space=MySpace(), + root_directory="results", + ) + + # Later, load the space + space = neps.load_pipeline_space("results") + ``` + """ + from neps.state import NePSState + + root_directory = Path(root_directory) + + try: + state = NePSState.create_or_load(path=root_directory, load_only=True) + pipeline_space = state.lock_and_get_search_space() + + if pipeline_space is None: + raise ValueError( + f"No pipeline space was saved in the neps run at: {root_directory}\n" + "This can happen if the run was created before pipeline space " + "persistence was added, or if the pipeline_space.pkl file was deleted." + ) + + return pipeline_space + except FileNotFoundError as e: + raise FileNotFoundError( + f"No neps state found at: {root_directory}\n" + "Please provide a valid neps results directory." + ) from e + + +def load_optimizer_info( + root_directory: str | Path, +) -> OptimizerInfo: + """Load the optimizer information from a neps run directory. + + This function loads the optimizer metadata that was saved during a neps.run() + call, including the optimizer name and its configuration parameters. This is + useful for inspecting what optimizer was used and with what settings. + + Args: + root_directory: Path to the neps results directory (the same path + that was passed to neps.run()). + + Returns: + A dictionary containing: + - 'name': The name of the optimizer (e.g., 'bayesian_optimization') + - 'info': Additional optimizer configuration (e.g., initialization kwargs) + + Raises: + FileNotFoundError: If no neps state is found at the given path. + + Example: + ```python + # After running neps + neps.run( + evaluate_pipeline=my_function, + pipeline_space=MySpace(), + root_directory="results", + searcher="bayesian_optimization", + ) + + # Later, check what optimizer was used + optimizer_info = neps.load_optimizer_info("results") + print(f"Optimizer: {optimizer_info['name']}") + print(f"Config: {optimizer_info['info']}") + ``` + """ + from neps.state import NePSState + + root_directory = Path(root_directory) + + try: + state = NePSState.create_or_load(path=root_directory, load_only=True) + return state.lock_and_get_optimizer_info() + except FileNotFoundError as e: + raise FileNotFoundError( + f"No neps state found at: {root_directory}\n" + "Please provide a valid neps results directory." + ) from e + + __all__ = [ "create_config", "import_trials", "load_config", + "load_optimizer_info", + "load_pipeline_space", "run", "save_pipeline_results", ] diff --git a/neps/runtime.py b/neps/runtime.py index 027b417bc..4118e01ad 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -53,6 +53,7 @@ from neps.utils.common import gc_disabled if TYPE_CHECKING: + from neps import SearchSpace from neps.optimizers import OptimizerInfo from neps.optimizers.optimizer import AskFunction @@ -1106,6 +1107,7 @@ def _launch_runtime( # noqa: PLR0913 optimizer: AskFunction, optimizer_info: OptimizerInfo, optimization_dir: Path, + pipeline_space: SearchSpace | PipelineSpace, cost_to_spend: float | None, ignore_errors: bool = False, objective_value_on_error: float | None, @@ -1158,8 +1160,13 @@ def _launch_runtime( # noqa: PLR0913 shared_state=None, # TODO: Unused for the time being... worker_ids=None, ), + pipeline_space=pipeline_space, ) break + except NePSError: + # Don't retry on NePSError - these are user errors + # like pipeline space mismatch + raise except Exception: # noqa: BLE001 time.sleep(0.5) logger.debug( diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 579aa7b76..25e0c4517 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -300,14 +300,10 @@ def add( ) param_name = name if name else f"param_{len(self.get_attrs()) + 1}" - class NewSpace(PipelineSpace): - pass - - NewSpace.__name__ = self.__class__.__name__ - - new_pipeline = NewSpace() + # Create a new class dynamically with the added parameter + # Get all existing attributes plus the new one + new_attrs = {} for exist_name, value in self.get_attrs().items(): - setattr(new_pipeline, exist_name, value) if exist_name == param_name and not _parameters_are_equivalent( value, new_param ): @@ -317,9 +313,26 @@ class NewSpace(PipelineSpace): f" {value}\n" f" {new_param}" ) - if not hasattr(new_pipeline, param_name): - setattr(new_pipeline, param_name, new_param) - return new_pipeline + new_attrs[exist_name] = value + + # Add the new parameter if it doesn't exist + if param_name not in new_attrs: + new_attrs[param_name] = new_param + + # Create a new class with a unique name that can be pickled + new_class_name = f"{self.__class__.__name__}_added_{param_name}" + NewSpace = type(new_class_name, (self.__class__.__bases__[0],), new_attrs) + + # Set module and qualname to match original for proper pickling + NewSpace.__module__ = self.__class__.__module__ + + # Register the new class in the module's namespace so pickle can find it + import sys + + module = sys.modules[NewSpace.__module__] + setattr(module, new_class_name, NewSpace) + + return cast(PipelineSpace, NewSpace()) def remove(self, name: str) -> PipelineSpace: """Remove a parameter from the pipeline by its name. This is NOT an in-place @@ -339,16 +352,28 @@ def remove(self, name: str) -> PipelineSpace: f"No parameter with the name {name!r} exists in the pipeline." ) - class NewSpace(PipelineSpace): - pass + # Create a new class dynamically without the removed parameter + # Get all attributes except the one to remove + new_attrs = {} + for attr_name, attr_value in self.get_attrs().items(): + if attr_name != name: + new_attrs[attr_name] = attr_value - NewSpace.__name__ = self.__class__.__name__ - new_pipeline = NewSpace() - for exist_name, value in self.get_attrs().items(): - if exist_name != name: - setattr(new_pipeline, exist_name, value) + # Create a new class with a unique name that can be pickled + # We use the original class as the base to maintain the class hierarchy + new_class_name = f"{self.__class__.__name__}_removed_{name}" + NewSpace = type(new_class_name, (self.__class__.__bases__[0],), new_attrs) - return new_pipeline + # Set module and qualname to match original for proper pickling + NewSpace.__module__ = self.__class__.__module__ + + # Register the new class in the module's namespace so pickle can find it + import sys + + module = sys.modules[NewSpace.__module__] + setattr(module, new_class_name, NewSpace) + + return cast(PipelineSpace, NewSpace()) def add_prior( self, @@ -376,11 +401,8 @@ def add_prior( f"No parameter with the name {parameter_name!r} exists in the pipeline." ) - class NewSpace(PipelineSpace): - pass - - NewSpace.__name__ = self.__class__.__name__ - new_pipeline = NewSpace() + # Create a new class dynamically with the modified parameter + new_attrs = {} for exist_name, value in self.get_attrs().items(): if exist_name == parameter_name: if isinstance(value, Integer | Float | Categorical): @@ -402,8 +424,22 @@ class NewSpace(PipelineSpace): ) else: new_value = value - setattr(new_pipeline, exist_name, new_value) - return new_pipeline + new_attrs[exist_name] = new_value + + # Create a new class with a unique name that can be pickled + new_class_name = f"{self.__class__.__name__}_prior_{parameter_name}" + NewSpace = type(new_class_name, (self.__class__.__bases__[0],), new_attrs) + + # Set module and qualname to match original for proper pickling + NewSpace.__module__ = self.__class__.__module__ + + # Register the new class in the module's namespace so pickle can find it + import sys + + module = sys.modules[NewSpace.__module__] + setattr(module, new_class_name, NewSpace) + + return cast(PipelineSpace, NewSpace()) class ConfidenceLevel(enum.Enum): diff --git a/neps/state/neps_state.py b/neps/state/neps_state.py index 928b5e07b..762791e22 100644 --- a/neps/state/neps_state.py +++ b/neps/state/neps_state.py @@ -43,6 +43,8 @@ if TYPE_CHECKING: from neps.optimizers import OptimizerInfo from neps.optimizers.optimizer import AskFunction + from neps.space import SearchSpace + from neps.space.neps_spaces.parameters import PipelineSpace from neps.utils.common import gc_disabled @@ -251,10 +253,14 @@ class NePSState: _optimizer_state_path: Path = field(repr=False) _optimizer_state: OptimizationState = field(repr=False) + _pipeline_space_path: Path = field(repr=False) + _err_lock: FileLocker = field(repr=False) _shared_errors_path: Path = field(repr=False) _shared_errors: ErrDump = field(repr=False) + _pipeline_space: SearchSpace | PipelineSpace | None = field(repr=False, default=None) + new_score: float = float("inf") """Tracking of the new incumbent""" @@ -543,6 +549,18 @@ def lock_and_get_optimizer_info(self) -> OptimizerInfo: with self._optimizer_lock.lock(): return _deserialize_optimizer_info(self._optimizer_info_path) + def lock_and_get_search_space(self) -> SearchSpace | PipelineSpace | None: + """Get the pipeline space, with the lock acquired. + + Returns: + The pipeline space if it was saved to disk, None otherwise. + """ + with self._optimizer_lock.lock(): + if not self._pipeline_space_path.exists(): + return None + with self._pipeline_space_path.open("rb") as f: + return pickle.load(f) # noqa: S301 + def lock_and_get_optimizer_state(self) -> OptimizationState: """Get the optimizer state.""" with self._optimizer_lock.lock(): # noqa: SIM117 @@ -627,13 +645,14 @@ def lock_and_get_current_evaluating_trials(self) -> list[Trial]: ] @classmethod - def create_or_load( + def create_or_load( # noqa: C901, PLR0912 cls, path: Path, *, load_only: bool = False, optimizer_info: OptimizerInfo | None = None, optimizer_state: OptimizationState | None = None, + pipeline_space: SearchSpace | PipelineSpace | None = None, ) -> NePSState: """Create a new NePSState in a directory or load the existing one if it already exists, depending on the argument. @@ -654,12 +673,16 @@ def create_or_load( load_only: If True, only load the state and do not create a new one. optimizer_info: The optimizer info to use. optimizer_state: The optimizer state to use. + pipeline_space: The pipeline space to save. Optional - if provided, it will be + saved to disk and validated on subsequent loads. Returns: The NePSState. Raises: - NePSError: If the optimizer info on disk does not match the one provided. + NePSError: If the optimizer info on disk does not match the one provided, + or if the pipeline space on disk does not match the one provided. + FileNotFoundError: If load_only=True and no NePSState exists at the path. """ path = path.absolute().resolve() is_new = not path.exists() @@ -676,6 +699,7 @@ def create_or_load( optimizer_info_path = path / "optimizer_info.yaml" optimizer_state_path = path / "optimizer_state.pkl" + pipeline_space_path = path / "pipeline_space.pkl" shared_errors_path = path / "shared_errors.jsonl" # We have to do one bit of sanity checking to ensure that the optimzier @@ -697,6 +721,47 @@ def create_or_load( with optimizer_state_path.open("rb") as f: optimizer_state = pickle.load(f) # noqa: S301 + # Load and validate pipeline space if it exists + if pipeline_space_path.exists(): + try: + with pipeline_space_path.open("rb") as f: + existing_space = pickle.load(f) # noqa: S301 + except (EOFError, pickle.UnpicklingError) as e: + # File exists but is empty or corrupted (race condition during write) + # Treat as if file doesn't exist yet + logger.debug( + f"Could not load pipeline_space.pkl (possibly being written): {e}" + ) + existing_space = None + else: + if ( + not load_only + and pipeline_space is not None + and pickle.dumps(existing_space) != pickle.dumps(pipeline_space) + ): + # Strictly validate that pipeline spaces match + # We use pickle dumps to compare since pipeline spaces may not + # implement __eq__ + raise NePSError( + "The pipeline space on disk does not match the one" + " provided.\nPipeline space is saved at:" + f" {pipeline_space_path}\nIf you want to start a new" + " run with a different pipeline space, use a" + " different root_directory or set" + " overwrite_root_directory=True." + ) + pipeline_space = existing_space + elif pipeline_space is None and not load_only: + # No pipeline space on disk and none provided for a new/continued run + # This is fine for backward compatibility (old runs) but log info + logger.info( + "No pipeline space provided and none found on disk. " + "This is fine for backward compatibility but consider providing one." + ) + elif pipeline_space is None: + # load_only=True and no pipeline space on disk - fine for backward compat + pass + optimizer_info = existing_info error_dump = ReaderWriterErrDump.read(shared_errors_path) else: @@ -707,6 +772,11 @@ def create_or_load( with optimizer_state_path.open("wb") as f: pickle.dump(optimizer_state, f, protocol=pickle.HIGHEST_PROTOCOL) + # Save pipeline space if provided + if pipeline_space is not None: + with atomic_write(pipeline_space_path, "wb") as f: + pickle.dump(pipeline_space, f, protocol=pickle.HIGHEST_PROTOCOL) + error_dump = ErrDump([]) return NePSState( @@ -733,8 +803,10 @@ def create_or_load( _optimizer_info=optimizer_info, _optimizer_state_path=optimizer_state_path, _optimizer_state=optimizer_state, # type: ignore + _pipeline_space_path=pipeline_space_path, _shared_errors_path=shared_errors_path, _shared_errors=error_dump, + _pipeline_space=pipeline_space, ) diff --git a/neps/status/status.py b/neps/status/status.py index b29e4dd85..66d2ab3b0 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -1,8 +1,12 @@ -"""Functions to get the status of a run and save the status to CSV files.""" +"""Functions to get the status of a run and save the status to CSV files. + +This module provides utilities for monitoring NePS optimization runs. +""" # ruff: noqa: T201 from __future__ import annotations +import contextlib import itertools from collections.abc import Sequence from dataclasses import asdict, dataclass, field @@ -20,6 +24,9 @@ if TYPE_CHECKING: from neps.space.neps_spaces.parameters import PipelineSpace + from neps.space.search_space import SearchSpace +else: + from neps.space.neps_spaces.parameters import PipelineSpace def _build_trace_texts(best_configs: list[dict]) -> tuple[str, str]: @@ -186,21 +193,14 @@ def num_pending(self) -> int: return len(self.by_state[State.PENDING]) def formatted( # noqa: PLR0912, C901 - self, pipeline_space: PipelineSpace | None = None + self, pipeline_space: PipelineSpace | SearchSpace | None = None ) -> str: """Return a formatted string of the summary. Args: - pipeline_space: The PipelineSpace used for the run. If provided, this is used - to format the best config in a more readable way. - - !!! Warning: - - This is only supported when using NePS-only optimizers. When the - search space is simple enough, using `neps.algorithms.random_search` - or `neps.algorithms.priorband` is not enough, as it will be - transformed to a simpler HPO framework, which is incompatible with - the `pipeline_space` argument. + pipeline_space: Optional PipelineSpace for the run. If provided, it is used + to format the best config in a more readable way. This is typically + auto-loaded from disk by the status() function. Returns: A formatted string of the summary. @@ -228,7 +228,8 @@ def formatted( # noqa: PLR0912, C901 ) if not pipeline_space: best_summary += f"{best_trial.config}" - else: + elif isinstance(pipeline_space, PipelineSpace): + # Only PipelineSpace supports pretty formatting - SearchSpace doesn't best_config_resolve = NepsCompatConverter().from_neps_config( best_trial.config ) @@ -278,6 +279,9 @@ def formatted( # noqa: PLR0912, C901 else: formatted_config = pipeline_config # type: ignore best_summary += f"\n\t{variables[n_pipeline]}: {formatted_config}" + else: + # SearchSpace or other space type - just use string representation + best_summary += f"{best_trial.config}" best_summary += f"\n path: {best_trial.metadata.location}" @@ -334,28 +338,27 @@ def status( root_directory: str | Path, *, print_summary: bool = False, - pipeline_space: PipelineSpace | None = None, ) -> tuple[pd.DataFrame, pd.Series]: """Print status information of a neps run and return results. Args: root_directory: The root directory given to neps.run. print_summary: If true, print a summary of the current run state. - pipeline_space: The PipelineSpace used for the run. If provided, this is used to - format the best config in a more readable way. - - !!! Warning: - - This is only supported when using NePS-only optimizers. When the - search space is simple enough, using `neps.algorithms.random_search` - or `neps.algorithms.priorband` is not enough, as it will be - transformed to a simpler HPO framework, which is incompatible with - the `pipeline_space` argument. Returns: Dataframe of full results and short summary series. """ root_directory = Path(root_directory) + + # Try to load pipeline_space from disk for pretty printing + pipeline_space = None + if print_summary: + from neps.api import load_pipeline_space + + with contextlib.suppress(FileNotFoundError, ValueError): + pipeline_space = load_pipeline_space(root_directory) + # Note: pipeline_space can still be None if it wasn't saved, which is fine + summary = Summary.from_directory(root_directory) if print_summary: diff --git a/neps_examples/basic_usage/pytorch_nn_example.py b/neps_examples/basic_usage/pytorch_nn_example.py index 710987b42..574994529 100644 --- a/neps_examples/basic_usage/pytorch_nn_example.py +++ b/neps_examples/basic_usage/pytorch_nn_example.py @@ -139,5 +139,4 @@ def evaluate_pipeline(model: nn.Sequential): neps.status( "results/neps_spaces_nn_example", print_summary=True, - pipeline_space=pipeline_space, ) diff --git a/test_all_pickle.py b/test_all_pickle.py new file mode 100644 index 000000000..96522c2c5 --- /dev/null +++ b/test_all_pickle.py @@ -0,0 +1,36 @@ +import pickle +import neps +from neps.space.neps_spaces.parameters import PipelineSpace + + +class SimpleSpace(PipelineSpace): + int_param1 = neps.Integer(1, 100) + int_param2 = neps.Integer(1, 100) + + +print("Testing remove()...") +space_remove = SimpleSpace().remove("int_param2") +print(f" After remove: {list(space_remove.get_attrs().keys())}") +pickled = pickle.dumps(space_remove) +unpickled = pickle.loads(pickled) +print(f" ✅ Pickle/unpickle successful: {list(unpickled.get_attrs().keys())}") + +print("\nTesting add()...") +space_add = SimpleSpace().add(neps.Float(0, 1), "new_float") +print(f" After add: {list(space_add.get_attrs().keys())}") +pickled = pickle.dumps(space_add) +unpickled = pickle.loads(pickled) +print(f" ✅ Pickle/unpickle successful: {list(unpickled.get_attrs().keys())}") + +print("\nTesting add_prior()...") +space_prior = SimpleSpace().add_prior("int_param1", 50, "medium") +print(f" After add_prior: {list(space_prior.get_attrs().keys())}") +print(f" int_param1 has prior: {space_prior.get_attrs()['int_param1'].has_prior}") +pickled = pickle.dumps(space_prior) +unpickled = pickle.loads(pickled) +print(f" ✅ Pickle/unpickle successful: {list(unpickled.get_attrs().keys())}") +print( + f" Unpickled int_param1 has prior: {unpickled.get_attrs()['int_param1'].has_prior}" +) + +print("\nAll tests passed! ✅") diff --git a/test_normalization_fix.py b/test_normalization_fix.py deleted file mode 100644 index 6bb0241ae..000000000 --- a/test_normalization_fix.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Test script for normalization with PipelineSpace.""" - -import neps -from neps.normalization import _normalize_imported_config - - -class TestSpace(neps.PipelineSpace): - x = neps.Float(0, 1) - y = neps.Integer(0, 10) - epochs = neps.Fidelity(neps.Integer(1, 10)) - - -space = TestSpace() - -# Config with correct SAMPLING__ and ENVIRONMENT__ keys, plus an extra invalid key -config = { - "SAMPLING__Resolvable.x::float__0_1_False": 0.5, - "SAMPLING__Resolvable.y::integer__0_10_False": 5, - "ENVIRONMENT__epochs": 3, - "extra_key": 999, # This should be removed -} - -print("Input config keys:", sorted(config.keys())) - -normalized = _normalize_imported_config(space, config) - -print("Normalized config keys:", sorted(normalized.keys())) -print("Extra key removed:", "extra_key" not in normalized) -print("\nAll expected keys present:") -print( - " - SAMPLING__Resolvable.x::float__0_1_False:", - "SAMPLING__Resolvable.x::float__0_1_False" in normalized, -) -print( - " - SAMPLING__Resolvable.y::integer__0_10_False:", - "SAMPLING__Resolvable.y::integer__0_10_False" in normalized, -) -print(" - ENVIRONMENT__epochs:", "ENVIRONMENT__epochs" in normalized) diff --git a/tests/test_state/test_search_space_persistence.py b/tests/test_state/test_search_space_persistence.py new file mode 100644 index 000000000..87ac110b2 --- /dev/null +++ b/tests/test_state/test_search_space_persistence.py @@ -0,0 +1,433 @@ +"""Tests for search space persistence in NePSState. + +This file focuses on low-level NePSState functionality: +- Saving and loading search spaces (PipelineSpace and SearchSpace) +- Backward compatibility (runs without search space) +- Testing utility functions like load_pipeline_space and load_optimizer_info + +For higher-level integration tests and validation logic, see +test_search_space_validation.py. +""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + +from neps.exceptions import NePSError +from neps.optimizers import OptimizerInfo +from neps.space import HPOCategorical, HPOFloat, HPOInteger, SearchSpace +from neps.space.neps_spaces.parameters import Categorical, Float, Integer, PipelineSpace +from neps.state import BudgetInfo, NePSState, OptimizationState, SeedSnapshot + + +class SimpleSpace(PipelineSpace): + """Simple test space with various parameter types.""" + + a = Float(0, 1) + b = Categorical(("x", "y", "z")) + c = Integer(0, 10) + + +class TestSpace1(PipelineSpace): + """First test space for validation.""" + + x = Float(0, 10) + y = Integer(1, 10) + + +class TestSpace2(PipelineSpace): + """Second test space (different from TestSpace1).""" + + x = Float(0, 10) + y = Integer(1, 20) # Different range + + +def test_search_space_saved_and_loaded_pipeline_space(tmp_path: Path) -> None: + """Test that PipelineSpace is saved and can be loaded back.""" + root_dir = tmp_path / "test_run" + pipeline_space = SimpleSpace() + + # Create state with search space + NePSState.create_or_load( + path=root_dir, + optimizer_info=OptimizerInfo(name="test", info={}), + optimizer_state=OptimizationState( + budget=BudgetInfo(cost_to_spend=10, used_cost_budget=0), + seed_snapshot=SeedSnapshot.new_capture(), + shared_state={}, + ), + pipeline_space=pipeline_space, + ) + + # Verify pipeline_space.pkl exists + assert (root_dir / "pipeline_space.pkl").exists() + + # Load state and verify search space + state2 = NePSState.create_or_load(path=root_dir, load_only=True) + loaded_space = state2.lock_and_get_search_space() + + assert loaded_space is not None + assert isinstance(loaded_space, PipelineSpace) + + # Verify the structure matches + assert "a" in loaded_space.get_attrs() + assert "b" in loaded_space.get_attrs() + assert "c" in loaded_space.get_attrs() + + +def test_search_space_saved_and_loaded_search_space(tmp_path: Path) -> None: + """Test that old-style SearchSpace is saved and can be loaded back.""" + root_dir = tmp_path / "test_run" + search_space = SearchSpace( + { + "a": HPOFloat(0, 1), + "b": HPOCategorical(["x", "y", "z"]), + "c": HPOInteger(0, 10), + } + ) + + # Create state with search space + NePSState.create_or_load( + path=root_dir, + optimizer_info=OptimizerInfo(name="test", info={}), + optimizer_state=OptimizationState( + budget=BudgetInfo(cost_to_spend=10, used_cost_budget=0), + seed_snapshot=SeedSnapshot.new_capture(), + shared_state={}, + ), + pipeline_space=search_space, + ) + + # Verify pipeline_space.pkl exists + assert (root_dir / "pipeline_space.pkl").exists() + + # Load state and verify search space + state2 = NePSState.create_or_load(path=root_dir, load_only=True) + loaded_space = state2.lock_and_get_search_space() + + assert loaded_space is not None + assert isinstance(loaded_space, SearchSpace) + assert "a" in loaded_space + assert "b" in loaded_space + assert "c" in loaded_space + + +def test_search_space_not_provided_backward_compatible(tmp_path: Path) -> None: + """Test that NePSState works without search space (backward compatibility).""" + root_dir = tmp_path / "test_run" + + # Create state WITHOUT search space + NePSState.create_or_load( + path=root_dir, + optimizer_info=OptimizerInfo(name="test", info={}), + optimizer_state=OptimizationState( + budget=BudgetInfo(cost_to_spend=10, used_cost_budget=0), + seed_snapshot=SeedSnapshot.new_capture(), + shared_state={}, + ), + ) + + # Verify pipeline_space.pkl does NOT exist + assert not (root_dir / "pipeline_space.pkl").exists() + + # Load state and verify search space is None + state2 = NePSState.create_or_load(path=root_dir, load_only=True) + loaded_space = state2.lock_and_get_search_space() + + assert loaded_space is None + + +def test_load_pipeline_space_function_pipeline_space(tmp_path: Path) -> None: + """Test the load_pipeline_space utility function with PipelineSpace.""" + from neps import load_pipeline_space + + root_dir = tmp_path / "test_run" + pipeline_space = SimpleSpace() + + # Create state with search space + NePSState.create_or_load( + path=root_dir, + optimizer_info=OptimizerInfo(name="test", info={}), + optimizer_state=OptimizationState( + budget=BudgetInfo(cost_to_spend=10, used_cost_budget=0), + seed_snapshot=SeedSnapshot.new_capture(), + shared_state={}, + ), + pipeline_space=pipeline_space, + ) + + # Load using the utility function + loaded_space = load_pipeline_space(root_dir) + + assert loaded_space is not None + assert isinstance(loaded_space, PipelineSpace) + assert "a" in loaded_space.get_attrs() + assert "b" in loaded_space.get_attrs() + assert "c" in loaded_space.get_attrs() + + +def test_load_pipeline_space_function_search_space(tmp_path: Path) -> None: + """Test the load_pipeline_space utility function with SearchSpace.""" + from neps import load_pipeline_space + + root_dir = tmp_path / "test_run" + search_space = SearchSpace( + { + "x": HPOFloat(0, 1), + "y": HPOInteger(1, 10), + } + ) + + # Create state with search space + NePSState.create_or_load( + path=root_dir, + optimizer_info=OptimizerInfo(name="test", info={}), + optimizer_state=OptimizationState( + budget=BudgetInfo(cost_to_spend=10, used_cost_budget=0), + seed_snapshot=SeedSnapshot.new_capture(), + shared_state={}, + ), + pipeline_space=search_space, + ) + + # Load using the utility function + loaded_space = load_pipeline_space(root_dir) + + assert loaded_space is not None + assert isinstance(loaded_space, SearchSpace) + assert "x" in loaded_space + assert "y" in loaded_space + + +def test_load_pipeline_space_function_not_found(tmp_path: Path) -> None: + """Test that load_pipeline_space raises FileNotFoundError for non-existent + directory. + """ + from neps import load_pipeline_space + + root_dir = tmp_path / "nonexistent" + + with pytest.raises(FileNotFoundError, match="No neps state found"): + load_pipeline_space(root_dir) + + +def test_load_pipeline_space_function_no_space_saved(tmp_path: Path) -> None: + """Test that load_pipeline_space raises ValueError when no search space was saved.""" + from neps import load_pipeline_space + + root_dir = tmp_path / "test_run" + + # Create state WITHOUT search space + NePSState.create_or_load( + path=root_dir, + optimizer_info=OptimizerInfo(name="test", info={}), + optimizer_state=OptimizationState( + budget=BudgetInfo(cost_to_spend=10, used_cost_budget=0), + seed_snapshot=SeedSnapshot.new_capture(), + shared_state={}, + ), + ) + + # Try to load - should raise ValueError + with pytest.raises(ValueError, match="No pipeline space was saved"): + load_pipeline_space(root_dir) + + +def test_load_optimizer_info_function(tmp_path: Path) -> None: + """Test the load_optimizer_info utility function.""" + from neps import load_optimizer_info + + root_dir = tmp_path / "test_run" + + # Create state with optimizer info + optimizer_info = OptimizerInfo( + name="bayesian_optimization", + info={"acquisition": "EI", "initial_design_size": 10}, + ) + NePSState.create_or_load( + path=root_dir, + optimizer_info=optimizer_info, + optimizer_state=OptimizationState( + budget=BudgetInfo(cost_to_spend=10, used_cost_budget=0), + seed_snapshot=SeedSnapshot.new_capture(), + shared_state={}, + ), + ) + + # Load using the utility function + loaded_info = load_optimizer_info(root_dir) + + assert loaded_info["name"] == "bayesian_optimization" + assert loaded_info["info"]["acquisition"] == "EI" + assert loaded_info["info"]["initial_design_size"] == 10 + + +def test_load_optimizer_info_function_not_found(tmp_path: Path) -> None: + """Test that load_optimizer_info raises FileNotFoundError for non-existent + directory. + """ + from neps import load_optimizer_info + + root_dir = tmp_path / "nonexistent" + + with pytest.raises(FileNotFoundError, match="No neps state found"): + load_optimizer_info(root_dir) + + +def test_import_trials_saves_search_space(tmp_path: Path) -> None: + """Test that import_trials saves the search space to disk.""" + from neps import import_trials, load_pipeline_space + from neps.state.pipeline_eval import UserResultDict + + root_dir = tmp_path / "test_import" + + # Import trials with a search space + evaluated_trials = [ + ({"x": 1.0, "y": 5}, UserResultDict(objective_to_minimize=1.0)), + ({"x": 2.0, "y": 8}, UserResultDict(objective_to_minimize=2.0)), + ] + + import_trials( + evaluated_trials=evaluated_trials, + root_directory=root_dir, + pipeline_space=TestSpace1(), + ) + + # Verify the search space was saved + loaded_space = load_pipeline_space(root_dir) + assert loaded_space is not None + # import_trials may convert PipelineSpace to SearchSpace, so check for SearchSpace + assert isinstance(loaded_space, TestSpace1 | SearchSpace) + # Verify it has the correct parameters by checking the keys + if isinstance(loaded_space, SearchSpace): + assert "x" in loaded_space + assert "y" in loaded_space + + +def test_import_trials_validates_search_space(tmp_path: Path) -> None: + """Test that import_trials validates the search space against what's on disk.""" + from neps import import_trials + from neps.state.pipeline_eval import UserResultDict + + root_dir = tmp_path / "test_import_validate" + + # First import with one space + evaluated_trials = [ + ({"x": 1.0, "y": 5}, UserResultDict(objective_to_minimize=1.0)), + ] + + import_trials( + evaluated_trials=evaluated_trials, + root_directory=root_dir, + pipeline_space=TestSpace1(), + ) + + # Try to import again with a different space - should raise error + with pytest.raises(NePSError, match="pipeline space on disk does not match"): + import_trials( + evaluated_trials=evaluated_trials, + root_directory=root_dir, + pipeline_space=TestSpace2(), + ) + + +def test_import_trials_without_space_loads_from_disk(tmp_path: Path) -> None: + """Test that import_trials can load pipeline space from disk when not provided.""" + from neps import import_trials, load_pipeline_space + from neps.state.pipeline_eval import UserResultDict + + root_dir = tmp_path / "test_import_auto_load" + + # First import with explicit space + evaluated_trials_1 = [ + ({"x": 1.0, "y": 5}, UserResultDict(objective_to_minimize=1.0)), + ] + + import_trials( + evaluated_trials=evaluated_trials_1, + root_directory=root_dir, + pipeline_space=TestSpace1(), + ) + + # Second import without providing space - should load from disk + evaluated_trials_2 = [ + ({"x": 2.0, "y": 8}, UserResultDict(objective_to_minimize=2.0)), + ] + + import_trials( + evaluated_trials=evaluated_trials_2, + root_directory=root_dir, + # pipeline_space not provided - should load from disk + ) + + # Verify both trials were imported successfully + loaded_space = load_pipeline_space(root_dir) + assert loaded_space is not None + + +def test_import_trials_without_space_fails_on_new_directory(tmp_path: Path) -> None: + """Test that import_trials raises error when space is not provided and directory + is new. + """ + from neps import import_trials + from neps.state.pipeline_eval import UserResultDict + + root_dir = tmp_path / "test_import_no_space_error" + + evaluated_trials = [ + ({"x": 1.0, "y": 5}, UserResultDict(objective_to_minimize=1.0)), + ] + + # Should raise error when no space provided and directory doesn't exist + with pytest.raises( + ValueError, match="pipeline_space is required when importing trials" + ): + import_trials( + evaluated_trials=evaluated_trials, + root_directory=root_dir, + # pipeline_space not provided + ) + + +def test_import_trials_validates_provided_space_against_disk(tmp_path: Path) -> None: + """Test that when both space is provided and exists on disk, they are validated.""" + from neps import import_trials + from neps.state.pipeline_eval import UserResultDict + + root_dir = tmp_path / "test_import_validation" + + # First import with TestSpace1 + evaluated_trials_1 = [ + ({"x": 1.0, "y": 5}, UserResultDict(objective_to_minimize=1.0)), + ] + + import_trials( + evaluated_trials=evaluated_trials_1, + root_directory=root_dir, + pipeline_space=TestSpace1(), + ) + + # Second import explicitly providing the same space - should work + evaluated_trials_2 = [ + ({"x": 2.0, "y": 8}, UserResultDict(objective_to_minimize=2.0)), + ] + + import_trials( + evaluated_trials=evaluated_trials_2, + root_directory=root_dir, + pipeline_space=TestSpace1(), + ) + + # Third import with different space - should fail + with pytest.raises(NePSError, match="pipeline space on disk does not match"): + import_trials( + evaluated_trials=evaluated_trials_2, + root_directory=root_dir, + pipeline_space=TestSpace2(), + ) + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/test_state/test_search_space_validation.py b/tests/test_state/test_search_space_validation.py new file mode 100644 index 000000000..fba23c73f --- /dev/null +++ b/tests/test_state/test_search_space_validation.py @@ -0,0 +1,252 @@ +"""Tests for search space validation and error handling. + +This file focuses on high-level integration tests through neps.run(): +- Strict validation (errors on mismatched search spaces) +- Auto-loading from disk +- Error handling when search space is missing +- Integration with load_config, status, and DDP runtime + +For low-level persistence tests, see test_search_space_persistence.py. +""" + +from __future__ import annotations + +import logging +from pathlib import Path + +import pytest + +import neps +from neps.exceptions import NePSError +from neps.space import SearchSpace +from neps.space.neps_spaces.parameters import Float, Integer, PipelineSpace +from neps.state import NePSState + + +class TestSpace1(PipelineSpace): + """First test pipeline space.""" + + x = Float(0.0, 1.0) + y = Integer(1, 10) + + +class TestSpace2(PipelineSpace): + """Different test pipeline space.""" + + a = Float(0.0, 2.0) + b = Integer(5, 20) + + +def eval_fn1(**config): + """Evaluation function for TestSpace1.""" + return config["x"] + config["y"] + + +def eval_fn2(**config): + """Evaluation function for TestSpace2.""" + return config["a"] + config["b"] + + +def test_error_on_mismatched_search_space(tmp_path: Path): + """Test that providing a different search space raises an error (strict + validation). + """ + root_dir = tmp_path / "test_error" + + # Create initial state with TestSpace1 + neps.run( + evaluate_pipeline=eval_fn1, + pipeline_space=TestSpace1(), + root_directory=str(root_dir), + evaluations_to_spend=1, + ) + + # Try to continue with TestSpace2 - should raise NePSError + with pytest.raises(NePSError, match="pipeline space on disk does not match"): + neps.run( + evaluate_pipeline=eval_fn2, + pipeline_space=TestSpace2(), + root_directory=str(root_dir), + evaluations_to_spend=2, + ) + + +def test_success_without_search_space_when_on_disk(tmp_path: Path): + """Test that not providing search space works when one exists on disk.""" + root_dir = tmp_path / "test_no_space" + + # Create initial state with TestSpace1 + neps.run( + evaluate_pipeline=eval_fn1, + pipeline_space=TestSpace1(), + root_directory=str(root_dir), + evaluations_to_spend=1, + ) + + # Continue WITHOUT providing pipeline_space - should load from disk + neps.run( + evaluate_pipeline=eval_fn1, + # pipeline_space not provided! + root_directory=str(root_dir), + evaluations_to_spend=3, # Total evaluations wanted + ) + + # Verify we have at least 2 evaluations (continuation worked) + df, _summary = neps.status(str(root_dir), print_summary=False) + assert len(df) >= 2, f"Should have at least 2 evaluations, got {len(df)}" + + +def test_error_when_no_space_provided_and_none_on_disk(tmp_path: Path): + """Test that not providing search space errors when none exists on disk.""" + root_dir = tmp_path / "test_no_space_error" + + # Try to run WITHOUT providing pipeline_space and with no existing run + with pytest.raises(ValueError, match="pipeline_space is required"): + neps.run( + evaluate_pipeline=eval_fn1, + # pipeline_space not provided and root_dir doesn't exist! + root_directory=str(root_dir), + evaluations_to_spend=1, + ) + + +def test_load_only_does_not_validate(tmp_path: Path, caplog): + """Test that load_only=True does not validate search space.""" + root_dir = tmp_path / "test_load_only" + + # Create initial state + neps.run( + evaluate_pipeline=eval_fn1, + pipeline_space=TestSpace1(), + root_directory=str(root_dir), + evaluations_to_spend=1, + ) + + # Load with load_only - should not error or warn about validation + with caplog.at_level(logging.WARNING): + state = NePSState.create_or_load( + path=root_dir, + load_only=True, + ) + loaded_space = state.lock_and_get_search_space() + + # Should not have validation errors/warnings since load_only=True + assert not any( + "pipeline space on disk" in record.message.lower() for record in caplog.records + ) + + # Should have loaded the original space + assert loaded_space is not None + + +def test_load_config_with_wrong_space_raises_error(tmp_path: Path): + """Test that load_config with wrong pipeline_space raises an error.""" + root_dir = tmp_path / "test_load_config_error" + + # Create run with TestSpace1 + neps.run( + evaluate_pipeline=eval_fn1, + pipeline_space=TestSpace1(), + root_directory=str(root_dir), + evaluations_to_spend=1, + ) + + # Find a config file + config_dir = root_dir / "configs" + configs = [ + d for d in config_dir.iterdir() if d.is_dir() and not d.name.startswith(".") + ] + assert len(configs) > 0, "Should have at least one config" + + config_path = configs[0] / "config.yaml" + + # Try to load with wrong pipeline_space - should raise error + with pytest.raises(NePSError, match="pipeline_space provided does not match"): + neps.load_config(config_path, pipeline_space=TestSpace2()) + + +def test_load_config_without_space_auto_loads(tmp_path: Path): + """Test that load_config without pipeline_space auto-loads from disk.""" + root_dir = tmp_path / "test_load_config_auto" + + # Create run + neps.run( + evaluate_pipeline=eval_fn1, + pipeline_space=TestSpace1(), + root_directory=str(root_dir), + evaluations_to_spend=1, + ) + + # Find a config file + config_dir = root_dir / "configs" + configs = [ + d for d in config_dir.iterdir() if d.is_dir() and not d.name.startswith(".") + ] + assert len(configs) > 0, "Should have at least one config" + + config_path = configs[0] / "config.yaml" + + # Load config without providing space - should auto-load from disk + config = neps.load_config(config_path) + + assert "x" in config, "Should have x parameter" + + +def test_ddp_runtime_loads_search_space(tmp_path: Path): + """Test that DDP runtime path also loads search space correctly.""" + root_dir = tmp_path / "test_ddp" + + # Create initial state with search space + neps.run( + evaluate_pipeline=eval_fn1, + pipeline_space=TestSpace1(), + root_directory=str(root_dir), + evaluations_to_spend=1, + ) + + # Simulate DDP path - just load_only (DDP doesn't create state) + state = NePSState.create_or_load(path=root_dir, load_only=True) + loaded_space = state.lock_and_get_search_space() + + assert loaded_space is not None, "DDP should be able to load search space" + # NePS converts PipelineSpace to SearchSpace internally + assert isinstance(loaded_space, PipelineSpace | SearchSpace), "Should load correctly" + + +def test_status_without_space_works(tmp_path: Path): + """Test that status works without explicit pipeline_space.""" + root_dir = tmp_path / "test_status_auto" + + # Create a run + neps.run( + evaluate_pipeline=eval_fn1, + pipeline_space=TestSpace1(), + root_directory=str(root_dir), + evaluations_to_spend=1, + ) + + # Status without pipeline_space - should work + df, _summary = neps.status(str(root_dir), print_summary=False) + assert len(df) > 0, "Should have results" + + +def test_status_handles_missing_search_space_gracefully(tmp_path: Path): + """Test that status doesn't crash if search space can't be loaded.""" + root_dir = tmp_path / "test_status_missing" + + # Create a run + neps.run( + evaluate_pipeline=eval_fn1, + pipeline_space=TestSpace1(), + root_directory=str(root_dir), + evaluations_to_spend=1, + ) + + # Delete the search space file + search_space_file = root_dir / "pipeline_space.pkl" + if search_space_file.exists(): + search_space_file.unlink() + + # Status with print_summary=False should work even without search space + df, _summary = neps.status(str(root_dir), print_summary=False) + assert len(df) > 0, "Should still get results" From 2b5c914816fea95353dd19814a324171a091fdcd Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 21 Nov 2025 01:06:13 +0100 Subject: [PATCH 117/156] fix: update import_trials to use pipeline_space parameter and improve logging --- neps_examples/convenience/import_trials.py | 5 ++- test_all_pickle.py | 36 ------------------- .../test_pipeline_space_methods.py | 4 +-- 3 files changed, 4 insertions(+), 41 deletions(-) delete mode 100644 test_all_pickle.py diff --git a/neps_examples/convenience/import_trials.py b/neps_examples/convenience/import_trials.py index 25296e48d..f5a335fe2 100644 --- a/neps_examples/convenience/import_trials.py +++ b/neps_examples/convenience/import_trials.py @@ -140,7 +140,6 @@ class ExampleSpace(neps.PipelineSpace): integer1 = neps.Integer(lower=0, upper=1) integer2 = neps.Integer(lower=1, upper=1000, log=True) - logging.info( f"{'-'*80} Running initial evaluations for optimizer {optimizer}. {'-'*80}" ) @@ -166,9 +165,9 @@ class ExampleSpace(neps.PipelineSpace): # import trials been evaluated above neps.import_trials( - ExampleSpace(), evaluated_trials=trials, root_directory=f"results/trial_import/results_{optimizer}", + pipeline_space=ExampleSpace(), overwrite_root_directory=True, optimizer=optimizer, ) @@ -180,9 +179,9 @@ class ExampleSpace(neps.PipelineSpace): # import some trials evaluated in some other setup neps.import_trials( - ExampleSpace(), evaluated_trials=get_evaluated_trials(optimizer), root_directory=f"results/trial_import/results_{optimizer}", + pipeline_space=ExampleSpace(), optimizer=optimizer, ) diff --git a/test_all_pickle.py b/test_all_pickle.py deleted file mode 100644 index 96522c2c5..000000000 --- a/test_all_pickle.py +++ /dev/null @@ -1,36 +0,0 @@ -import pickle -import neps -from neps.space.neps_spaces.parameters import PipelineSpace - - -class SimpleSpace(PipelineSpace): - int_param1 = neps.Integer(1, 100) - int_param2 = neps.Integer(1, 100) - - -print("Testing remove()...") -space_remove = SimpleSpace().remove("int_param2") -print(f" After remove: {list(space_remove.get_attrs().keys())}") -pickled = pickle.dumps(space_remove) -unpickled = pickle.loads(pickled) -print(f" ✅ Pickle/unpickle successful: {list(unpickled.get_attrs().keys())}") - -print("\nTesting add()...") -space_add = SimpleSpace().add(neps.Float(0, 1), "new_float") -print(f" After add: {list(space_add.get_attrs().keys())}") -pickled = pickle.dumps(space_add) -unpickled = pickle.loads(pickled) -print(f" ✅ Pickle/unpickle successful: {list(unpickled.get_attrs().keys())}") - -print("\nTesting add_prior()...") -space_prior = SimpleSpace().add_prior("int_param1", 50, "medium") -print(f" After add_prior: {list(space_prior.get_attrs().keys())}") -print(f" int_param1 has prior: {space_prior.get_attrs()['int_param1'].has_prior}") -pickled = pickle.dumps(space_prior) -unpickled = pickle.loads(pickled) -print(f" ✅ Pickle/unpickle successful: {list(unpickled.get_attrs().keys())}") -print( - f" Unpickled int_param1 has prior: {unpickled.get_attrs()['int_param1'].has_prior}" -) - -print("\nAll tests passed! ✅") diff --git a/tests/test_neps_space/test_pipeline_space_methods.py b/tests/test_neps_space/test_pipeline_space_methods.py index c91b1859a..a720f3e01 100644 --- a/tests/test_neps_space/test_pipeline_space_methods.py +++ b/tests/test_neps_space/test_pipeline_space_methods.py @@ -390,5 +390,5 @@ def test_space_string_representation(): # Should be able to get string representation without error str_repr = str(modified_space) assert "BasicSpace" in str_repr - assert "added_param" in str_repr - assert "y" not in str_repr # Should be removed + assert "added_param = " in str_repr + assert "y = " not in str_repr # Should be removed From d3e7d19f3bbf5053e5feea7fe02893ad3df22159 Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 21 Nov 2025 13:51:09 +0100 Subject: [PATCH 118/156] feat: Improve trial import functionality and fix PipelineSpace serialization --- neps/optimizers/neps_random_search.py | 65 +++++++++++++++++++++++++-- neps/space/neps_spaces/parameters.py | 45 +++++++++++++++++++ neps/state/neps_state.py | 16 ++++--- neps/state/trial.py | 30 +++++++++---- 4 files changed, 138 insertions(+), 18 deletions(-) diff --git a/neps/optimizers/neps_random_search.py b/neps/optimizers/neps_random_search.py index afc49c9e2..008bdfc41 100644 --- a/neps/optimizers/neps_random_search.py +++ b/neps/optimizers/neps_random_search.py @@ -6,10 +6,11 @@ import heapq import random -from collections.abc import Mapping +from collections.abc import Mapping, Sequence from dataclasses import dataclass -from typing import TYPE_CHECKING, Literal +from typing import TYPE_CHECKING, Any, Literal +from neps.optimizers import optimizer from neps.space.neps_spaces.neps_space import _prepare_sampled_configs, resolve from neps.space.neps_spaces.parameters import Float, Integer from neps.space.neps_spaces.sampling import ( @@ -24,8 +25,8 @@ if TYPE_CHECKING: import neps.state.optimizer as optimizer_state import neps.state.trial as trial_state - from neps.optimizers import optimizer from neps.space.neps_spaces.parameters import PipelineSpace + from neps.state.pipeline_eval import UserResultDict from neps.state.trial import Trial @@ -135,6 +136,35 @@ def __call__( return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) + def import_trials( + self, + external_evaluations: Sequence[tuple[Mapping[str, Any], UserResultDict]], + trials: Mapping[str, Trial], + ) -> list[optimizer.ImportedConfig]: + """Import external evaluations as trials. + + Args: + external_evaluations: A sequence of tuples containing configuration + dictionaries and their corresponding results. + trials: A mapping of trial IDs to Trial objects, representing previous + trials. + + Returns: + A list of ImportedConfig objects representing the imported trials. + """ + n_trials = len(trials) + imported_configs = [] + for i, (config, result) in enumerate(external_evaluations): + config_id = str(n_trials + i + 1) + imported_configs.append( + optimizer.ImportedConfig( + config=config, + id=config_id, + result=result, + ) + ) + return imported_configs + @dataclass class NePSComplexRandomSearch: @@ -389,3 +419,32 @@ def __call__( chosen_pipelines[0] = prior_pipeline return _prepare_sampled_configs(chosen_pipelines, n_prev_trials, return_single) + + def import_trials( + self, + external_evaluations: Sequence[tuple[Mapping[str, Any], UserResultDict]], + trials: Mapping[str, Trial], + ) -> list[optimizer.ImportedConfig]: + """Import external evaluations as trials. + + Args: + external_evaluations: A sequence of tuples containing configuration + dictionaries and their corresponding results. + trials: A mapping of trial IDs to Trial objects, representing previous + trials. + + Returns: + A list of ImportedConfig objects representing the imported trials. + """ + n_trials = len(trials) + imported_configs = [] + for i, (config, result) in enumerate(external_evaluations): + config_id = str(n_trials + i + 1) + imported_configs.append( + optimizer.ImportedConfig( + config=config, + id=config_id, + result=result, + ) + ) + return imported_configs diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 25e0c4517..a1f94e07b 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -28,10 +28,41 @@ class _Unset: def __repr__(self) -> str: return "" + def __reduce__(self) -> tuple: + """Custom pickle support to maintain singleton pattern across contexts.""" + return (_get_unset_singleton, ()) + _UNSET = _Unset() +def _get_unset_singleton() -> _Unset: + """Return the global _UNSET singleton. + + This function is used by _Unset.__reduce__ to ensure the singleton + pattern is maintained when pickling and unpickling. + """ + return _UNSET + + +def _reconstruct_pipeline_space(attrs: Mapping[str, Any]) -> PipelineSpace: + """Reconstruct a PipelineSpace from its attributes. + + This function is used by __reduce__ to enable pickling of PipelineSpace + instances across different module contexts. + + Args: + attrs: A mapping of attribute names to their values. + + Returns: + A new PipelineSpace instance with the specified attributes. + """ + space = PipelineSpace() + for name, value in attrs.items(): + setattr(space, name, value) + return space + + def _parameters_are_equivalent(param1: Any, param2: Any) -> bool: """Check if two parameters are equivalent using their is_equivalent_to method. @@ -186,6 +217,20 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Fidelity: # noqa: ARG002 class PipelineSpace(Resolvable): """A class representing a pipeline in NePS spaces.""" + def __reduce__(self) -> tuple: + """Custom pickle support to make PipelineSpace serializable across contexts. + + This method enables PipelineSpace instances (including custom subclasses) + to be pickled and unpickled even when the original class definition is not + available (e.g., when defined in __main__ or a notebook). + + Returns: + A tuple (callable, args) for reconstructing the object. + """ + # Store the attributes instead of the class definition + attrs = dict(self.get_attrs()) + return (_reconstruct_pipeline_space, (attrs,)) + @property def fidelity_attrs(self) -> Mapping[str, Fidelity]: """Get the fidelity attributes of the pipeline. Fidelity attributes are special diff --git a/neps/state/neps_state.py b/neps/state/neps_state.py index 762791e22..2b2c3bf36 100644 --- a/neps/state/neps_state.py +++ b/neps/state/neps_state.py @@ -341,12 +341,12 @@ def lock_and_sample_trial( return trials def lock_and_import_trials( - self, data: list, *, worker_id: str + self, imported_configs: list, *, worker_id: str ) -> Trial | list[Trial]: """Acquire the state lock and import trials from external data. Args: - data: List of trial dictionaries to import. + imported_configs: List of trial dictionaries to import. worker_id: The worker ID performing the import. Returns: @@ -357,11 +357,15 @@ def lock_and_import_trials( NePSError: If storing or reporting trials fails. """ with self._optimizer_lock.lock(), gc_disabled(): - trials = Trial.load_from_dict(data=data, worker_id=worker_id) + imported_configs = Trial.load_from_dict( + data=imported_configs, + worker_id=worker_id, + trial_directory=self._trial_repo.directory, + ) with self._trial_lock.lock(): - self._trial_repo.store_new_trial(trials) - for trial in trials: + self._trial_repo.store_new_trial(imported_configs) + for trial in imported_configs: assert trial.report is not None self._report_trial_evaluation( trial=trial, @@ -373,7 +377,7 @@ def lock_and_import_trials( f"Imported trial {trial.id} with result: " f"{trial.report.objective_to_minimize}." ) - return trials + return imported_configs def lock_and_report_trial_evaluation( self, diff --git a/neps/state/trial.py b/neps/state/trial.py index 8b40e8076..34b5d9059 100644 --- a/neps/state/trial.py +++ b/neps/state/trial.py @@ -6,6 +6,7 @@ from collections.abc import Mapping from dataclasses import dataclass from enum import Enum +from pathlib import Path from typing import Any, ClassVar, Literal from typing_extensions import Self @@ -148,11 +149,22 @@ def load_from_dict( data: list, *, worker_id: str, + trial_directory: Path, ) -> list[Self]: - """Load a trial from a dictionary with state EXTERNAL.""" - trials: list[Self] = [] + """Load a trial from a dictionary with state EXTERNAL. + + Args: + data: A list of ImportedConfig objects to load. + worker_id: The worker id that is importing the trials. + trial_directory: The directory where trials are stored. + + Returns: + A list of Trial objects. + """ + loaded_trials: list[Self] = [] for i, imported_conf in enumerate(data): info_dict = imported_conf.result.get("info_dict") or {} + location = str(trial_directory / f"config_{imported_conf.id}") trial = cls( config=imported_conf.config, @@ -163,13 +175,13 @@ def load_from_dict( time_started=info_dict.get("time_started"), time_end=info_dict.get("time_end"), evaluation_duration=info_dict.get("evaluation_duration"), - previous_trial_id=None if i == 0 else trials[i - 1].metadata.id, + previous_trial_id=( + None if i == 0 else loaded_trials[i - 1].metadata.id + ), sampling_worker_id=worker_id, evaluating_worker_id=worker_id, - location="external", - previous_trial_location=None - if i == 0 - else trials[i - 1].metadata.location, + location=location, + previous_trial_location=None, ), report=Report( reported_as="success", @@ -183,8 +195,8 @@ def load_from_dict( ), source="imported", ) - trials.append(trial) - return trials + loaded_trials.append(trial) + return loaded_trials @property def id(self) -> str: From a2d878e7a91bc0f760597ea86045f873f2aa7c5c Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 21 Nov 2025 18:44:07 +0100 Subject: [PATCH 119/156] feat: Enhance pipeline space and optimizer validation and loading mechanism in NePSState --- neps/api.py | 33 ++++++++++++++++++---- neps/state/neps_state.py | 60 +++++++++++++++++++++++++++------------- 2 files changed, 69 insertions(+), 24 deletions(-) diff --git a/neps/api.py b/neps/api.py index 9d1562830..b26a7f191 100644 --- a/neps/api.py +++ b/neps/api.py @@ -39,7 +39,7 @@ logger = logging.getLogger(__name__) -def run( # noqa: C901, D417, PLR0912, PLR0913 +def run( # noqa: C901, D417, PLR0912, PLR0913, PLR0915 evaluate_pipeline: Callable[..., EvaluatePipelineReturn] | str, pipeline_space: ConfigurationSpace | PipelineSpace | None = None, *, @@ -197,10 +197,10 @@ class MySpace(PipelineSpace): the run. This is, e.g., useful when debugging a evaluate_pipeline function. evaluations_to_spend: Number of evaluations this specific call/worker should do. - ??? note "Limitation on Async mode" - Currently, there is no specific number to control number of parallel evaluations running with - the same worker, so in case you want to limit the number of parallel evaluations, - it's crucial to limit the `evaluations_to_spend` accordingly. + ??? note "Limitation on Async mode" + Currently, there is no specific number to control number of parallel evaluations running with + the same worker, so in case you want to limit the number of parallel evaluations, + it's crucial to limit the `evaluations_to_spend` accordingly. continue_until_max_evaluation_completed: If true, stop only after evaluations_to_spend have fully completed. In other words, @@ -369,6 +369,29 @@ def __call__( logger.info(f"Starting neps.run using root directory {root_directory}") + # Check if we're continuing an existing run and should load the optimizer from disk + root_path = Path(root_directory) + optimizer_info_path = root_path / "optimizer_info.yaml" + is_continuing_run = optimizer_info_path.exists() and not overwrite_root_directory + + # If continuing a run and optimizer is "auto" (default), load existing optimizer + # with its parameters + if is_continuing_run and optimizer == "auto": + try: + existing_optimizer_info = load_optimizer_info(root_path) + logger.info( + "Continuing optimization with existing optimizer: " + f"{existing_optimizer_info['name']}" + ) + # Use the existing optimizer with its original parameters + optimizer = ( + existing_optimizer_info["name"], + existing_optimizer_info["info"], + ) # type: ignore + except (FileNotFoundError, KeyError) as e: + # No existing optimizer found or invalid format, proceed with auto + logger.debug(f"Could not load existing optimizer info: {e}") + # Check if the pipeline_space only contains basic HPO parameters. # If yes, we convert it to a classic SearchSpace, to use with the old optimizers. # If no, we use adjust_evaluation_pipeline_for_neps_space to convert the diff --git a/neps/state/neps_state.py b/neps/state/neps_state.py index 2b2c3bf36..43bc738f6 100644 --- a/neps/state/neps_state.py +++ b/neps/state/neps_state.py @@ -649,7 +649,7 @@ def lock_and_get_current_evaluating_trials(self) -> list[Trial]: ] @classmethod - def create_or_load( # noqa: C901, PLR0912 + def create_or_load( # noqa: C901, PLR0912, PLR0915 cls, path: Path, *, @@ -672,6 +672,8 @@ def create_or_load( # noqa: C901, PLR0912 In principal, we could allow multiple optimizers to be run and share the same set of trials. + We do the same check for the pipeline space, if provided. + Args: path: The directory to create the state in. load_only: If True, only load the state and do not create a new one. @@ -696,6 +698,7 @@ def create_or_load( # noqa: C901, PLR0912 else: assert optimizer_info is not None assert optimizer_state is not None + assert pipeline_space is not None path.mkdir(parents=True, exist_ok=True) config_dir = path / "configs" @@ -719,8 +722,9 @@ def create_or_load( # noqa: C901, PLR0912 if not load_only and existing_info != optimizer_info: raise NePSError( "The optimizer info on disk does not match the one provided." - f"\nOn disk: {existing_info}\nProvided: {optimizer_info}" - f"\n\nLoaded the one on disk from {path}." + f"\nOn disk: {existing_info}" + f"\n Loaded from {path}." + f"\nProvided: {optimizer_info}" ) with optimizer_state_path.open("rb") as f: optimizer_state = pickle.load(f) # noqa: S301 @@ -738,22 +742,40 @@ def create_or_load( # noqa: C901, PLR0912 ) existing_space = None else: - if ( - not load_only - and pipeline_space is not None - and pickle.dumps(existing_space) != pickle.dumps(pipeline_space) - ): - # Strictly validate that pipeline spaces match - # We use pickle dumps to compare since pipeline spaces may not - # implement __eq__ - raise NePSError( - "The pipeline space on disk does not match the one" - " provided.\nPipeline space is saved at:" - f" {pipeline_space_path}\nIf you want to start a new" - " run with a different pipeline space, use a" - " different root_directory or set" - " overwrite_root_directory=True." - ) + if not load_only and pipeline_space is not None: + # Compare semantic attributes instead of raw pickle bytes + # This allows trivial changes like renaming the space class + from neps.space.neps_spaces.parameters import PipelineSpace as PS + + if isinstance(existing_space, PS) and isinstance( + pipeline_space, PS + ): + # Compare the actual parameter definitions + if pickle.dumps(existing_space.get_attrs()) != pickle.dumps( + pipeline_space.get_attrs() + ): + raise NePSError( + "The pipeline space parameters on disk do not match" + " those provided.\nPipeline space is saved at:" + f" {pipeline_space_path}\n\nTo continue this run:" + " either omit the pipeline_space parameter or use" + " neps.load_pipeline_space() to load the existing" + " one.\n\nTo start a new run with different" + " parameters, use a different root_directory or set" + " overwrite_root_directory=True." + ) + elif pickle.dumps(existing_space) != pickle.dumps(pipeline_space): + # Fallback for non-PipelineSpace objects (SearchSpace) + raise NePSError( + "The pipeline space on disk does not match the one" + " provided.\nPipeline space is saved at:" + f" {pipeline_space_path}\n\nTo continue this run: either" + " omit the pipeline_space parameter or use" + " neps.load_pipeline_space() to load the existing" + " one.\n\nTo start a new run with a different pipeline" + " space, use a different root_directory or set" + " overwrite_root_directory=True." + ) pipeline_space = existing_space elif pipeline_space is None and not load_only: # No pipeline space on disk and none provided for a new/continued run From e51b556f566bba8e2b92cec12e260b3b9cdfb281 Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 21 Nov 2025 19:14:18 +0100 Subject: [PATCH 120/156] feat: Implement equality comparison for NePSState and update tests to use PipelineSpace --- neps/state/neps_state.py | 43 ++++++++++++++++++- .../test_default_report_values.py | 21 +++++++-- .../test_error_handling_strategies.py | 21 +++++++-- .../test_save_evaluation_results.py | 1 + tests/test_runtime/test_stopping_criterion.py | 30 ++++++++++--- tests/test_runtime/test_worker_creation.py | 1 + tests/test_state/test_filebased_neps_state.py | 19 ++++++++ tests/test_state/test_neps_state.py | 4 ++ .../test_search_space_persistence.py | 21 ++++----- .../test_search_space_validation.py | 24 +++++------ 10 files changed, 149 insertions(+), 36 deletions(-) diff --git a/neps/state/neps_state.py b/neps/state/neps_state.py index 43bc738f6..91a2c7313 100644 --- a/neps/state/neps_state.py +++ b/neps/state/neps_state.py @@ -267,6 +267,47 @@ class NePSState: all_best_configs: list = field(default_factory=list) """Trajectory to the newest incbumbent""" + def __eq__(self, other: object) -> bool: + """Compare two NePSState objects for equality. + + Pipeline spaces are compared by pickle dumps to handle cases where + the class type differs after unpickling but the content is equivalent. + """ + if not isinstance(other, NePSState): + return NotImplemented + + # Compare all fields except _pipeline_space + for field_name in [ + "path", + "_trial_lock", + "_trial_repo", + "_optimizer_lock", + "_optimizer_info_path", + "_optimizer_info", + "_optimizer_state_path", + "_optimizer_state", + "_pipeline_space_path", + "_err_lock", + "_shared_errors_path", + "_shared_errors", + "new_score", + "all_best_configs", + ]: + if getattr(self, field_name) != getattr(other, field_name): + return False + + # Compare pipeline spaces by pickle dumps + self_space = self._pipeline_space + other_space = other._pipeline_space + + if self_space is None and other_space is None: + return True + if self_space is None or other_space is None: + return False + + # Compare using pickle dumps - safe and handles all cases + return pickle.dumps(self_space) == pickle.dumps(other_space) + def lock_and_set_new_worker_id(self, worker_id: str | None = None) -> str: """Acquire the state lock and set a new worker id in the optimizer state. @@ -698,7 +739,7 @@ def create_or_load( # noqa: C901, PLR0912, PLR0915 else: assert optimizer_info is not None assert optimizer_state is not None - assert pipeline_space is not None + # TODO: assert pipeline_space is None -> optional for backward compatibility path.mkdir(parents=True, exist_ok=True) config_dir = path / "configs" diff --git a/tests/test_runtime/test_default_report_values.py b/tests/test_runtime/test_default_report_values.py index 6f2e976c3..39fd216a1 100644 --- a/tests/test_runtime/test_default_report_values.py +++ b/tests/test_runtime/test_default_report_values.py @@ -7,7 +7,7 @@ from neps.optimizers import OptimizerInfo from neps.optimizers.algorithms import random_search from neps.runtime import DefaultWorker -from neps.space import HPOFloat, SearchSpace +from neps.space.neps_spaces.parameters import Float, PipelineSpace from neps.state import ( DefaultReportValues, NePSState, @@ -21,19 +21,26 @@ @fixture def neps_state(tmp_path: Path) -> NePSState: + class TestSpace(PipelineSpace): + a = Float(0, 1) + return NePSState.create_or_load( path=tmp_path / "neps_state", optimizer_info=OptimizerInfo(name="blah", info={"nothing": "here"}), optimizer_state=OptimizationState( budget=None, seed_snapshot=SeedSnapshot.new_capture(), shared_state={} ), + pipeline_space=TestSpace(), ) def test_default_values_on_error( neps_state: NePSState, ) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) + class TestSpace(PipelineSpace): + a = Float(0, 1) + + optimizer = random_search(pipeline_space=TestSpace()) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues( @@ -83,7 +90,10 @@ def eval_function(*args, **kwargs) -> float: def test_default_values_on_not_specified( neps_state: NePSState, ) -> None: - optimizer = random_search(SearchSpace({"a": HPOFloat(0, 1)})) + class TestSpace(PipelineSpace): + a = Float(0, 1) + + optimizer = random_search(TestSpace()) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues( @@ -131,7 +141,10 @@ def eval_function(*args, **kwargs) -> float: def test_default_value_objective_to_minimize_curve_take_objective_to_minimize_value( neps_state: NePSState, ) -> None: - optimizer = random_search(SearchSpace({"a": HPOFloat(0, 1)})) + class TestSpace(PipelineSpace): + a = Float(0, 1) + + optimizer = random_search(TestSpace()) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues( diff --git a/tests/test_runtime/test_error_handling_strategies.py b/tests/test_runtime/test_error_handling_strategies.py index 4f6dff46a..25730c35a 100644 --- a/tests/test_runtime/test_error_handling_strategies.py +++ b/tests/test_runtime/test_error_handling_strategies.py @@ -11,7 +11,7 @@ from neps.optimizers import OptimizerInfo from neps.optimizers.algorithms import random_search from neps.runtime import DefaultWorker -from neps.space import HPOFloat, SearchSpace +from neps.space.neps_spaces.parameters import Float, PipelineSpace from neps.state import ( DefaultReportValues, NePSState, @@ -25,6 +25,9 @@ @fixture def neps_state(tmp_path: Path) -> NePSState: + class TestSpace(PipelineSpace): + a = Float(0, 1) + return NePSState.create_or_load( path=tmp_path / "neps_state", optimizer_info=OptimizerInfo(name="blah", info={"nothing": "here"}), @@ -33,6 +36,7 @@ def neps_state(tmp_path: Path) -> NePSState: seed_snapshot=SeedSnapshot.new_capture(), shared_state=None, ), + pipeline_space=TestSpace(), ) @@ -44,7 +48,10 @@ def test_worker_raises_when_error_in_self( neps_state: NePSState, on_error: OnErrorPossibilities, ) -> None: - optimizer = random_search(SearchSpace({"a": HPOFloat(0, 1)})) + class TestSpace(PipelineSpace): + a = Float(0, 1) + + optimizer = random_search(TestSpace()) settings = WorkerSettings( on_error=on_error, # <- Highlight default_report_values=DefaultReportValues(), @@ -82,7 +89,10 @@ def eval_function(*args, **kwargs) -> float: def test_worker_raises_when_error_in_other_worker(neps_state: NePSState) -> None: - optimizer = random_search(SearchSpace({"a": HPOFloat(0, 1)})) + class TestSpace(PipelineSpace): + a = Float(0, 1) + + optimizer = random_search(TestSpace()) settings = WorkerSettings( on_error=OnErrorPossibilities.RAISE_ANY_ERROR, # <- Highlight default_report_values=DefaultReportValues(), @@ -140,7 +150,10 @@ def test_worker_does_not_raise_when_error_in_other_worker( neps_state: NePSState, on_error: OnErrorPossibilities, ) -> None: - optimizer = random_search(SearchSpace({"a": HPOFloat(0, 1)})) + class TestSpace(PipelineSpace): + a = Float(0, 1) + + optimizer = random_search(TestSpace()) settings = WorkerSettings( on_error=on_error, # <- Highlight default_report_values=DefaultReportValues(), diff --git a/tests/test_runtime/test_save_evaluation_results.py b/tests/test_runtime/test_save_evaluation_results.py index 3c5651cbc..91b37584d 100644 --- a/tests/test_runtime/test_save_evaluation_results.py +++ b/tests/test_runtime/test_save_evaluation_results.py @@ -27,6 +27,7 @@ def neps_state(tmp_path: Path) -> NePSState: optimizer_state=OptimizationState( budget=None, seed_snapshot=SeedSnapshot.new_capture(), shared_state={} ), + pipeline_space=ASpace(), ) diff --git a/tests/test_runtime/test_stopping_criterion.py b/tests/test_runtime/test_stopping_criterion.py index 85495981b..f26b25bbe 100644 --- a/tests/test_runtime/test_stopping_criterion.py +++ b/tests/test_runtime/test_stopping_criterion.py @@ -9,6 +9,7 @@ from neps.optimizers.optimizer import OptimizerInfo from neps.runtime import DefaultWorker from neps.space import HPOFloat, HPOInteger, SearchSpace +from neps.space.neps_spaces.parameters import Float, PipelineSpace from neps.state import ( DefaultReportValues, NePSState, @@ -22,6 +23,9 @@ @fixture def neps_state(tmp_path: Path) -> NePSState: + class TestSpace(PipelineSpace): + a = Float(0, 1) + return NePSState.create_or_load( path=tmp_path / "neps_state", optimizer_info=OptimizerInfo(name="blah", info={"nothing": "here"}), @@ -30,13 +34,17 @@ def neps_state(tmp_path: Path) -> NePSState: seed_snapshot=SeedSnapshot.new_capture(), shared_state=None, ), + pipeline_space=TestSpace(), ) def test_evaluations_to_spend_stopping_criterion( neps_state: NePSState, ) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) + class TestSpace(PipelineSpace): + a = Float(0, 1) + + optimizer = random_search(pipeline_space=TestSpace()) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -104,7 +112,10 @@ def eval_function(*args, **kwargs) -> float: def test_multiple_criteria_set( neps_state: NePSState, ) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) + class TestSpace(PipelineSpace): + a = Float(0, 1) + + optimizer = random_search(pipeline_space=TestSpace()) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -172,7 +183,10 @@ def eval_function(*args, **kwargs) -> dict: def test_include_in_progress_evaluations_towards_maximum_with_work_eval_count( neps_state: NePSState, ) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) + class TestSpace(PipelineSpace): + a = Float(0, 1) + + optimizer = random_search(pipeline_space=TestSpace()) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -235,7 +249,10 @@ def eval_function(*args, **kwargs) -> float: def test_worker_wallclock_time(neps_state: NePSState) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) + class TestSpace(PipelineSpace): + a = Float(0, 1) + + optimizer = random_search(pipeline_space=TestSpace()) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -273,7 +290,10 @@ def eval_function(*args, **kwargs) -> float: def test_max_worker_evaluation_time(neps_state: NePSState) -> None: - optimizer = random_search(pipeline_space=SearchSpace({"a": HPOFloat(0, 1)})) + class TestSpace(PipelineSpace): + a = Float(0, 1) + + optimizer = random_search(pipeline_space=TestSpace()) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), diff --git a/tests/test_runtime/test_worker_creation.py b/tests/test_runtime/test_worker_creation.py index 38e84ab3b..fbef648ae 100644 --- a/tests/test_runtime/test_worker_creation.py +++ b/tests/test_runtime/test_worker_creation.py @@ -24,6 +24,7 @@ def neps_state(tmp_path: Path) -> NePSState: optimizer_state=OptimizationState( budget=None, seed_snapshot=SeedSnapshot.new_capture(), shared_state={} ), + pipeline_space=ASpace(), ) diff --git a/tests/test_state/test_filebased_neps_state.py b/tests/test_state/test_filebased_neps_state.py index 5572abb4d..978d3db78 100644 --- a/tests/test_state/test_filebased_neps_state.py +++ b/tests/test_state/test_filebased_neps_state.py @@ -13,6 +13,7 @@ from neps.exceptions import NePSError, TrialNotFoundError from neps.optimizers import OptimizerInfo +from neps.space.neps_spaces.parameters import Float, PipelineSpace from neps.state.err_dump import ErrDump from neps.state.neps_state import NePSState from neps.state.optimizer import BudgetInfo, OptimizationState @@ -47,11 +48,15 @@ def test_create_with_new_filebased_neps_state( optimizer_info: OptimizerInfo, optimizer_state: OptimizationState, ) -> None: + class TestSpace(PipelineSpace): + a = Float(0, 1) + new_path = tmp_path / "neps_state" neps_state = NePSState.create_or_load( path=new_path, optimizer_info=optimizer_info, optimizer_state=optimizer_state, + pipeline_space=TestSpace(), ) assert neps_state.lock_and_get_optimizer_info() == optimizer_info assert neps_state.lock_and_get_optimizer_state() == optimizer_state @@ -70,11 +75,15 @@ def test_create_or_load_with_load_filebased_neps_state( optimizer_info: OptimizerInfo, optimizer_state: OptimizationState, ) -> None: + class TestSpace(PipelineSpace): + a = Float(0, 1) + new_path = tmp_path / "neps_state" neps_state = NePSState.create_or_load( path=new_path, optimizer_info=optimizer_info, optimizer_state=optimizer_state, + pipeline_space=TestSpace(), ) # NOTE: This isn't a defined way to do this but we should check @@ -89,6 +98,7 @@ def test_create_or_load_with_load_filebased_neps_state( path=new_path, optimizer_info=optimizer_info, optimizer_state=different_state, + pipeline_space=TestSpace(), ) assert neps_state == neps_state2 @@ -98,11 +108,15 @@ def test_load_on_existing_neps_state( optimizer_info: OptimizerInfo, optimizer_state: OptimizationState, ) -> None: + class TestSpace(PipelineSpace): + a = Float(0, 1) + new_path = tmp_path / "neps_state" neps_state = NePSState.create_or_load( path=new_path, optimizer_info=optimizer_info, optimizer_state=optimizer_state, + pipeline_space=TestSpace(), ) neps_state2 = NePSState.create_or_load(path=new_path, load_only=True) @@ -114,11 +128,15 @@ def test_new_or_load_on_existing_neps_state_with_different_optimizer_info( optimizer_info: OptimizerInfo, optimizer_state: OptimizationState, ) -> None: + class TestSpace(PipelineSpace): + a = Float(0, 1) + new_path = tmp_path / "neps_state" NePSState.create_or_load( path=new_path, optimizer_info=optimizer_info, optimizer_state=optimizer_state, + pipeline_space=TestSpace(), ) with pytest.raises(NePSError): @@ -126,4 +144,5 @@ def test_new_or_load_on_existing_neps_state_with_different_optimizer_info( path=new_path, optimizer_info=OptimizerInfo(name="randomlll", info={"e": "f"}), optimizer_state=optimizer_state, + pipeline_space=TestSpace(), ) diff --git a/tests/test_state/test_neps_state.py b/tests/test_state/test_neps_state.py index c0c95375a..49462fd00 100644 --- a/tests/test_state/test_neps_state.py +++ b/tests/test_state/test_neps_state.py @@ -197,6 +197,9 @@ def case_neps_state_filebased( optimizer_info: OptimizerInfo, shared_state: dict[str, Any], ) -> NePSState: + class TestSpace(PipelineSpace): + a = Float(0, 1) + new_path = tmp_path / "neps_state" return NePSState.create_or_load( path=new_path, @@ -206,6 +209,7 @@ def case_neps_state_filebased( seed_snapshot=SeedSnapshot.new_capture(), shared_state=shared_state, ), + pipeline_space=TestSpace(), ) diff --git a/tests/test_state/test_search_space_persistence.py b/tests/test_state/test_search_space_persistence.py index 87ac110b2..b322e14e7 100644 --- a/tests/test_state/test_search_space_persistence.py +++ b/tests/test_state/test_search_space_persistence.py @@ -30,14 +30,14 @@ class SimpleSpace(PipelineSpace): c = Integer(0, 10) -class TestSpace1(PipelineSpace): +class Space1(PipelineSpace): """First test space for validation.""" x = Float(0, 10) y = Integer(1, 10) -class TestSpace2(PipelineSpace): +class Space2(PipelineSpace): """Second test space (different from TestSpace1).""" x = Float(0, 10) @@ -254,6 +254,7 @@ def test_load_optimizer_info_function(tmp_path: Path) -> None: seed_snapshot=SeedSnapshot.new_capture(), shared_state={}, ), + pipeline_space=SimpleSpace(), ) # Load using the utility function @@ -292,14 +293,14 @@ def test_import_trials_saves_search_space(tmp_path: Path) -> None: import_trials( evaluated_trials=evaluated_trials, root_directory=root_dir, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), ) # Verify the search space was saved loaded_space = load_pipeline_space(root_dir) assert loaded_space is not None # import_trials may convert PipelineSpace to SearchSpace, so check for SearchSpace - assert isinstance(loaded_space, TestSpace1 | SearchSpace) + assert isinstance(loaded_space, Space1 | SearchSpace) # Verify it has the correct parameters by checking the keys if isinstance(loaded_space, SearchSpace): assert "x" in loaded_space @@ -321,7 +322,7 @@ def test_import_trials_validates_search_space(tmp_path: Path) -> None: import_trials( evaluated_trials=evaluated_trials, root_directory=root_dir, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), ) # Try to import again with a different space - should raise error @@ -329,7 +330,7 @@ def test_import_trials_validates_search_space(tmp_path: Path) -> None: import_trials( evaluated_trials=evaluated_trials, root_directory=root_dir, - pipeline_space=TestSpace2(), + pipeline_space=Space2(), ) @@ -348,7 +349,7 @@ def test_import_trials_without_space_loads_from_disk(tmp_path: Path) -> None: import_trials( evaluated_trials=evaluated_trials_1, root_directory=root_dir, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), ) # Second import without providing space - should load from disk @@ -406,7 +407,7 @@ def test_import_trials_validates_provided_space_against_disk(tmp_path: Path) -> import_trials( evaluated_trials=evaluated_trials_1, root_directory=root_dir, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), ) # Second import explicitly providing the same space - should work @@ -417,7 +418,7 @@ def test_import_trials_validates_provided_space_against_disk(tmp_path: Path) -> import_trials( evaluated_trials=evaluated_trials_2, root_directory=root_dir, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), ) # Third import with different space - should fail @@ -425,7 +426,7 @@ def test_import_trials_validates_provided_space_against_disk(tmp_path: Path) -> import_trials( evaluated_trials=evaluated_trials_2, root_directory=root_dir, - pipeline_space=TestSpace2(), + pipeline_space=Space2(), ) diff --git a/tests/test_state/test_search_space_validation.py b/tests/test_state/test_search_space_validation.py index fba23c73f..89ff71c52 100644 --- a/tests/test_state/test_search_space_validation.py +++ b/tests/test_state/test_search_space_validation.py @@ -23,14 +23,14 @@ from neps.state import NePSState -class TestSpace1(PipelineSpace): +class Space1(PipelineSpace): """First test pipeline space.""" x = Float(0.0, 1.0) y = Integer(1, 10) -class TestSpace2(PipelineSpace): +class Space2(PipelineSpace): """Different test pipeline space.""" a = Float(0.0, 2.0) @@ -56,7 +56,7 @@ def test_error_on_mismatched_search_space(tmp_path: Path): # Create initial state with TestSpace1 neps.run( evaluate_pipeline=eval_fn1, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), root_directory=str(root_dir), evaluations_to_spend=1, ) @@ -65,7 +65,7 @@ def test_error_on_mismatched_search_space(tmp_path: Path): with pytest.raises(NePSError, match="pipeline space on disk does not match"): neps.run( evaluate_pipeline=eval_fn2, - pipeline_space=TestSpace2(), + pipeline_space=Space2(), root_directory=str(root_dir), evaluations_to_spend=2, ) @@ -78,7 +78,7 @@ def test_success_without_search_space_when_on_disk(tmp_path: Path): # Create initial state with TestSpace1 neps.run( evaluate_pipeline=eval_fn1, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), root_directory=str(root_dir), evaluations_to_spend=1, ) @@ -117,7 +117,7 @@ def test_load_only_does_not_validate(tmp_path: Path, caplog): # Create initial state neps.run( evaluate_pipeline=eval_fn1, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), root_directory=str(root_dir), evaluations_to_spend=1, ) @@ -146,7 +146,7 @@ def test_load_config_with_wrong_space_raises_error(tmp_path: Path): # Create run with TestSpace1 neps.run( evaluate_pipeline=eval_fn1, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), root_directory=str(root_dir), evaluations_to_spend=1, ) @@ -162,7 +162,7 @@ def test_load_config_with_wrong_space_raises_error(tmp_path: Path): # Try to load with wrong pipeline_space - should raise error with pytest.raises(NePSError, match="pipeline_space provided does not match"): - neps.load_config(config_path, pipeline_space=TestSpace2()) + neps.load_config(config_path, pipeline_space=Space2()) def test_load_config_without_space_auto_loads(tmp_path: Path): @@ -172,7 +172,7 @@ def test_load_config_without_space_auto_loads(tmp_path: Path): # Create run neps.run( evaluate_pipeline=eval_fn1, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), root_directory=str(root_dir), evaluations_to_spend=1, ) @@ -199,7 +199,7 @@ def test_ddp_runtime_loads_search_space(tmp_path: Path): # Create initial state with search space neps.run( evaluate_pipeline=eval_fn1, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), root_directory=str(root_dir), evaluations_to_spend=1, ) @@ -220,7 +220,7 @@ def test_status_without_space_works(tmp_path: Path): # Create a run neps.run( evaluate_pipeline=eval_fn1, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), root_directory=str(root_dir), evaluations_to_spend=1, ) @@ -237,7 +237,7 @@ def test_status_handles_missing_search_space_gracefully(tmp_path: Path): # Create a run neps.run( evaluate_pipeline=eval_fn1, - pipeline_space=TestSpace1(), + pipeline_space=Space1(), root_directory=str(root_dir), evaluations_to_spend=1, ) From b20bcaab563cf31d3c53d11961c524ee2c7ae5ec Mon Sep 17 00:00:00 2001 From: Nastaran Alipour <66458044+nastaran78@users.noreply.github.com> Date: Sun, 23 Nov 2025 14:18:53 +0100 Subject: [PATCH 121/156] refactor: centralize resource tracking - remove trajectory-related variables (#253) --- neps/runtime.py | 658 ++++++++---------- neps/state/neps_state.py | 9 +- neps/status/status.py | 129 ++-- .../test_trajectory_and_metrics.py | 129 ++++ 4 files changed, 493 insertions(+), 432 deletions(-) diff --git a/neps/runtime.py b/neps/runtime.py index 4118e01ad..6258e10cd 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -12,9 +12,9 @@ import shutil import time import traceback -from collections.abc import Callable, Iterator, Mapping +from collections.abc import Callable, Iterator, Mapping, Sequence from contextlib import contextmanager -from dataclasses import dataclass +from dataclasses import asdict, dataclass from pathlib import Path from typing import TYPE_CHECKING, ClassVar, Literal @@ -49,7 +49,12 @@ WorkerSettings, evaluate_trial, ) -from neps.status.status import _build_trace_texts, _initiate_summary_csv, status +from neps.status.status import ( + _build_incumbent_content, + _build_optimal_set_content, + _initiate_summary_csv, + status, +) from neps.utils.common import gc_disabled if TYPE_CHECKING: @@ -159,6 +164,33 @@ def _set_global_trial(trial: Trial) -> Iterator[None]: _CURRENTLY_RUNNING_TRIAL_IN_PROCESS = None +@dataclass +class ResourceUsage: + """Container for tracking cumulative resource usage.""" + + evaluations: int = 0 + cost: float = 0.0 + fidelities: float = 0.0 + time: float = 0.0 + + def __iadd__(self, other: ResourceUsage) -> ResourceUsage: + """Allows syntax: usage += other_usage.""" + self.evaluations += other.evaluations + self.cost += other.cost + self.fidelities += other.fidelities + self.time += other.time + return self + + def to_trajectory_dict(self) -> dict[str, float | int]: + """Converts usage to the dictionary keys expected by the trajectory file.""" + return { + "cumulative_evaluations": self.evaluations, + "cumulative_cost": self.cost, + "cumulative_fidelities": self.fidelities, + "cumulative_time": self.time, + } + + # NOTE: This class is quite stateful and has been split up quite a bit to make testing # interleaving of workers easier. This comes at the cost of more fragmented code. @dataclass @@ -284,181 +316,138 @@ def _check_shared_error_stopping_criterion(self) -> str | Literal[False]: return False - @staticmethod - def _get_evaluations_spent( + def _calculate_total_resource_usage( # noqa: C901 + self, trials: Mapping[str, Trial], + subset_worker_id: str | None = None, *, include_in_progress: bool = False, - ) -> int: - count_evals = sum(1 for _, trial in trials.items() if trial.report is not None) - if include_in_progress: - count_evals += sum( - 1 - for _, trial in trials.items() - if trial.metadata.state == Trial.State.EVALUATING - ) - return count_evals + ) -> ResourceUsage: + """Calculates total resources returning a typed usage object. - @staticmethod - def _get_evaluation_time( - trials: Mapping[str, Trial], - *, - include_in_progress: bool = False, - ) -> float: - evaluation_time = sum( - trial.report.evaluation_duration - for _, trial in trials.items() - if trial.report is not None and trial.report.evaluation_duration is not None - ) - if include_in_progress: - evaluation_time += sum( - trial.report.evaluation_duration - for _, trial in trials.items() - if trial.metadata.state == Trial.State.EVALUATING - and trial.report is not None - and trial.report.evaluation_duration is not None - ) - return evaluation_time + Args: + trials: Dictionary of trials to calculate from. + subset_worker_id: If provided, only calculates for + trials evaluated by this worker ID. + include_in_progress: Whether to include incomplete trials. + """ + relevant_trials = list(trials.values()) + if subset_worker_id is not None: + relevant_trials = [ + t + for t in relevant_trials + if t.metadata.evaluating_worker_id == subset_worker_id + ] + + fidelity_name = None + if hasattr(self.optimizer, "space"): + if isinstance(self.optimizer.space, PipelineSpace): + if self.optimizer.space.fidelity_attrs: + fidelity_name = next(iter(self.optimizer.space.fidelity_attrs.keys())) + fidelity_name = ( + f"{NepsCompatConverter._ENVIRONMENT_PREFIX}{fidelity_name}" + ) + elif self.optimizer.space.fidelities: + fidelity_name = next(iter(self.optimizer.space.fidelities.keys())) - @staticmethod - def _get_cost_spent( - trials: Mapping[str, Trial], - *, - include_in_progress: bool = False, - ) -> float: - cost = sum( - trial.report.cost - for _, trial in trials.items() - if trial.report is not None and trial.report.cost is not None - ) - if include_in_progress: - cost += sum( - trial.report.cost - for _, trial in trials.items() - if trial.metadata.state == Trial.State.EVALUATING - and trial.report is not None - and trial.report.cost is not None - ) - return cost + usage = ResourceUsage() - @staticmethod - def _get_fidelities_spent( - trials: Mapping[str, Trial], - optimizer: AskFunction, - *, - include_in_progress: bool = False, - ) -> float | int: - if not hasattr(optimizer, "space"): - return 0 - if isinstance(optimizer.space, PipelineSpace): - fidelity_name = next(iter(optimizer.space.fidelity_attrs.keys())) - fidelity_name = f"{NepsCompatConverter._ENVIRONMENT_PREFIX}{fidelity_name}" - else: - fidelity_name = next(iter(optimizer.space.fidelities.keys())) + for trial in relevant_trials: + if not ( + trial.report is not None + or ( + include_in_progress and trial.metadata.state == Trial.State.EVALUATING + ) + ): + continue + usage.evaluations += 1 + if trial.report and trial.report.cost is not None: + usage.cost += trial.report.cost + + # Handle time: either from report or calculate from metadata + if trial.report and trial.report.evaluation_duration is not None: + usage.time += trial.report.evaluation_duration + elif ( + trial.metadata.time_started is not None + and trial.metadata.time_end is not None + ): + usage.time += trial.metadata.time_end - trial.metadata.time_started - fidelities_spent = sum( - trial.config[fidelity_name] - for _, trial in trials.items() - if trial.report is not None and trial.config[fidelity_name] is not None - ) - if include_in_progress: - fidelities_spent += sum( - trial.config[fidelity_name] - for _, trial in trials.items() - if trial.metadata.state == Trial.State.EVALUATING - and trial.report is not None + if ( + fidelity_name + and fidelity_name in trial.config and trial.config[fidelity_name] is not None - ) - return fidelities_spent + ): + usage.fidelities += trial.config[fidelity_name] + + return usage def _check_global_stopping_criterion( self, trials: Mapping[str, Trial], - ) -> tuple[str | Literal[False], dict[str, float | int]]: - return_dict: dict[str, float | int] = {} + ) -> tuple[str | Literal[False], ResourceUsage]: + """Evaluates if any global stopping criterion has been met.""" + worker_resource_usage = self._calculate_total_resource_usage( + trials, + subset_worker_id=self.worker_id, + include_in_progress=self.settings.include_in_progress_evaluations_towards_maximum, + ) + + global_resource_usage = self._calculate_total_resource_usage( + trials, + subset_worker_id=None, + include_in_progress=self.settings.include_in_progress_evaluations_towards_maximum, + ) + return_string: str | Literal[False] = False - # worker related stopping criterion - worker_trials = { - _id: trial - for _id, trial in trials.items() - if trial.metadata.evaluating_worker_id == self.worker_id - } - if self.settings.evaluations_to_spend is not None: - count = self._get_evaluations_spent( - trials=worker_trials, - include_in_progress=self.settings.include_in_progress_evaluations_towards_maximum, - ) - cumulative_eval_count = self._get_evaluations_spent( - trials=trials, - include_in_progress=self.settings.include_in_progress_evaluations_towards_maximum, - ) - return_dict["cumulative_evaluations"] = cumulative_eval_count - if count >= self.settings.evaluations_to_spend: - return_string = ( - "Worker has reached the maximum number of evaluations it is allowed" - f" to do as given by `{self.settings.evaluations_to_spend=}`." - "\nTo allow more evaluations, increase this value or use a different" - " stopping criterion." - ) - if self.settings.fidelities_to_spend is not None: - count_fidelities = self._get_fidelities_spent( - trials=worker_trials, - optimizer=self.optimizer, - include_in_progress=self.settings.include_in_progress_evaluations_towards_maximum, - ) - cumulative_fidelities = self._get_fidelities_spent( - trials=trials, - optimizer=self.optimizer, - include_in_progress=self.settings.include_in_progress_evaluations_towards_maximum, + if ( + self.settings.evaluations_to_spend is not None + and worker_resource_usage.evaluations >= self.settings.evaluations_to_spend + ): + return_string = ( + "Worker has reached the maximum number of evaluations it is allowed" + f" to do as given by `{self.settings.evaluations_to_spend=}`." + "\nTo allow more evaluations, increase this value or use a different" + " stopping criterion." ) - return_dict["cumulative_fidelities"] = cumulative_fidelities - if count_fidelities >= self.settings.fidelities_to_spend: - return_string = ( - "The total number of fidelity evaluations has reached the maximum" - f" allowed of `{self.settings.fidelities_to_spend=}`." - " To allow more evaluations, increase this value or use a different" - " stopping criterion." - ) - if self.settings.cost_to_spend is not None: - cost = self._get_cost_spent( - trials=worker_trials, - include_in_progress=self.settings.include_in_progress_evaluations_towards_maximum, - ) - cumulative_cost = self._get_cost_spent( - trials=trials, - include_in_progress=self.settings.include_in_progress_evaluations_towards_maximum, + if ( + self.settings.fidelities_to_spend is not None + and worker_resource_usage.fidelities >= self.settings.fidelities_to_spend + ): + return_string = ( + "The total number of fidelity evaluations has reached the maximum" + f" allowed of `{self.settings.fidelities_to_spend=}`." + " To allow more evaluations, increase this value or use a different" + " stopping criterion." ) - return_dict["cumulative_cost"] = cumulative_cost - if cost >= self.settings.cost_to_spend: - return_string = ( - "Worker has reached the maximum cost it is allowed to spend" - f" which is given by `{self.settings.cost_to_spend=}`." - f" This worker has spend '{cost}'." - "\n To allow more evaluations, increase this value or use a different" - " stopping criterion." - ) - if self.settings.max_evaluation_time_total_seconds is not None: - time_spent = self._get_evaluation_time( - trials=worker_trials, - include_in_progress=self.settings.include_in_progress_evaluations_towards_maximum, + if ( + self.settings.cost_to_spend is not None + and worker_resource_usage.cost >= self.settings.cost_to_spend + ): + return_string = ( + "Worker has reached the maximum cost it is allowed to spend" + f" which is given by `{self.settings.cost_to_spend=}`." + f" This worker has spend '{worker_resource_usage.cost}'." + "\n To allow more evaluations, increase this value or use a different" + " stopping criterion." ) - cumulative_time = self._get_evaluation_time( - trials=trials, - include_in_progress=self.settings.include_in_progress_evaluations_towards_maximum, + + if ( + self.settings.max_evaluation_time_total_seconds is not None + and worker_resource_usage.time + >= self.settings.max_evaluation_time_total_seconds + ): + return_string = ( + "The maximum evaluation time of" + f" `{self.settings.max_evaluation_time_total_seconds=}` has been" + " reached. To allow more evaluations, increase this value or use" + " a different stopping criterion." ) - return_dict["cumulative_time"] = cumulative_time - if time_spent >= self.settings.max_evaluation_time_total_seconds: - return_string = ( - "The maximum evaluation time of" - f" `{self.settings.max_evaluation_time_total_seconds=}` has been" - " reached. To allow more evaluations, increase this value or use" - " a different stopping criterion." - ) - return (return_string, return_dict) + return (return_string, global_resource_usage) @property def _requires_global_stopping_criterion(self) -> bool: @@ -469,7 +458,35 @@ def _requires_global_stopping_criterion(self) -> bool: or self.settings.max_evaluation_time_total_seconds is not None ) - def _get_next_trial(self) -> Trial | Literal["break"]: # noqa: PLR0915 + def _write_trajectory_files( + self, + incumbent_configs: list, + optimal_configs: list, + trace_lock: FileLock, + improvement_trace_path: Path, + best_config_path: Path, + final_stopping_criteria: ResourceUsage | None = None, + ) -> None: + """Writes the trajectory and best config files safely.""" + trace_text = _build_incumbent_content(incumbent_configs) + + best_config_text = _build_optimal_set_content(optimal_configs) + + if final_stopping_criteria: + best_config_text += "\n" + "-" * 80 + best_config_text += "\nFinal cumulative metrics (Assuming completed run):" + for metric, value in final_stopping_criteria.to_trajectory_dict().items(): + best_config_text += f"\n{metric}: {value}" + + with trace_lock: + if incumbent_configs: + with improvement_trace_path.open(mode="w") as f: + f.write(trace_text) + if optimal_configs: + with best_config_path.open(mode="w") as f: + f.write(best_config_text) + + def _get_next_trial(self) -> Trial | Literal["break"]: # If there are no global stopping criterion, we can no just return early. with self.state._optimizer_lock.lock(worker_id=self.worker_id): # NOTE: It's important to release the trial lock before sampling @@ -486,40 +503,10 @@ def _get_next_trial(self) -> Trial | Literal["break"]: # noqa: PLR0915 trials = self.state._trial_repo.latest() if self._requires_global_stopping_criterion: - should_stop, stop_dict = self._check_global_stopping_criterion(trials) + should_stop, stop_criteria = self._check_global_stopping_criterion( + trials + ) if should_stop is not False: - _trace_lock = FileLock(".trace.lock") - _trace_lock_path = Path(str(_trace_lock.lock_file)) - _trace_lock_path.touch(exist_ok=True) - - # Update the best_config.txt to include the final cumulative - # metrics - main_dir = Path(self.state.path) - summary_dir = main_dir / "summary" - improvement_trace_path = ( - summary_dir / "best_config_trajectory.txt" - ) - best_config_path = summary_dir / "best_config.txt" - - with _trace_lock: - trace_text, best_config_text = _build_trace_texts( - self.state.all_best_configs - ) - - # Add final cumulative metrics to the best config text - best_config_text += "\n" - best_config_text += "-" * 80 - best_config_text += ( - "\nFinal cumulative metrics (Assuming completed run):" - ) - for metric, value in stop_dict.items(): - best_config_text += f"\n{metric}: {value}" - - with improvement_trace_path.open(mode="w") as f: - f.write(trace_text) - - with best_config_path.open(mode="w") as f: - f.write(best_config_text) logger.info(should_stop) return "break" @@ -641,29 +628,6 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 summary_dir, ) - previous_trials = self.state.lock_and_read_trials() - if len(previous_trials): - self.load_incumbent_trace( - previous_trials, - _trace_lock, - self.state, - self.settings, - improvement_trace_path, - best_config_path, - self.optimizer, - ) - - # FIX: Use the actual best score from trajectory, not state.new_score - # state.new_score may have been set to the last processed trial, not the best - _best_score_so_far = float("inf") - if self.state.all_best_configs: - # Get the true minimum from all best configs - _best_score_so_far = min( - config["score"] for config in self.state.all_best_configs - ) - # Update state.new_score to the actual best - self.state.new_score = _best_score_so_far - optimizer_name = self.state._optimizer_info["name"] logger.info("Using optimizer: %s", optimizer_name) @@ -795,55 +759,16 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 for _key, callback in _TRIAL_END_CALLBACKS.items(): callback(trial_to_eval) - if ( - report.objective_to_minimize is not None - and report.err is None - and not isinstance(report.objective_to_minimize, list) - ): - self.state.new_score = report.objective_to_minimize - if self.state.new_score < _best_score_so_far: - _best_score_so_far = self.state.new_score - logger.info( - "New best: trial %s with objective %s", - evaluated_trial.id, - self.state.new_score, - ) - - # Store in memory for later file re-writing - global_stopping_criterion = self._check_global_stopping_criterion( - self.state._trial_repo.latest() - )[1] - - config_dict = { - "score": self.state.new_score, - "trial_id": evaluated_trial.id, - "config": evaluated_trial.config, - } - if report.cost is not None: - config_dict["cost"] = report.cost - for metric in ( - "cumulative_evaluations", - "cumulative_fidelities", - "cumulative_cost", - "cumulative_time", - ): - if metric in global_stopping_criterion: - config_dict[metric] = global_stopping_criterion[metric] - self.state.all_best_configs.append(config_dict) - - # Build trace text and best config text using shared function - trace_text, best_config_text = _build_trace_texts( - self.state.all_best_configs + if report.objective_to_minimize is not None and report.err is None: + with self.state._trial_lock.lock(): + trials = self.state._trial_repo.latest() + self.load_incumbent_trace( + trials, + _trace_lock, + improvement_trace_path, + best_config_path, ) - # Write files from scratch - with _trace_lock: - with improvement_trace_path.open(mode="w") as f: - f.write(trace_text) - - with best_config_path.open(mode="w") as f: - f.write(best_config_text) - full_df, short = status(main_dir) with csv_locker.lock(): full_df.to_csv(full_df_path) @@ -856,22 +781,19 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 "Learning Curve %s: %s", evaluated_trial.id, report.learning_curve ) - def load_incumbent_trace( # noqa: C901, PLR0912 + def load_incumbent_trace( self, - previous_trials: dict[str, Trial], + trials: dict[str, Trial], _trace_lock: FileLock, - state: NePSState, - settings: WorkerSettings, improvement_trace_path: Path, best_config_path: Path, - optimizer: AskFunction, ) -> None: """Load the incumbent trace from previous trials and update the state. - This function also computes cumulative metrics and updates the best + This function also computes cumulative resource usage and updates the best configurations. Args: - previous_trials (dict): A dictionary of previous trials. + trials (dict): A dictionary of the trials. _trace_lock (FileLock): A file lock to ensure thread-safe writing. state (NePSState): The current NePS state. settings (WorkerSettings): The worker settings. @@ -879,103 +801,71 @@ def load_incumbent_trace( # noqa: C901, PLR0912 best_config_path (Path): Path to the best configuration file. optimizer (AskFunction): The optimizer used for sampling configurations. """ - # Clear any existing entries to prevent duplicates - state.all_best_configs.clear() + if not trials: + return - _best_score_so_far = float("inf") + # Clear any existing entries to prevent duplicates and rebuild a + # non-dominated frontier from previous trials in chronological order. + incumbent = [] - metrics = { - "cumulative_evaluations": 0, - "cumulative_fidelities": 0.0, - "cumulative_cost": 0.0, - "cumulative_time": 0.0, - } + running_usage = ResourceUsage() - # FIX: Sort trials chronologically to maintain trajectory monotonicity - sorted_trials = sorted( - previous_trials.values(), + sorted_trials: list[Trial] = sorted( + trials.values(), key=lambda t: ( t.metadata.time_sampled if t.metadata.time_sampled else float("inf") ), ) + evaluated_trials: list[Trial] = [ + trial + for trial in sorted_trials + if trial.report is not None and trial.report.objective_to_minimize is not None + ] + is_mo = any( + isinstance(trial.report.objective_to_minimize, list) # type: ignore[union-attr] + for trial in evaluated_trials + ) - for evaluated_trial in sorted_trials: - if ( - evaluated_trial.report is not None - and evaluated_trial.report.objective_to_minimize is not None - ): - metrics["cumulative_evaluations"] += 1 - if ( - settings.cost_to_spend is not None - and evaluated_trial.report.cost is not None - ): - metrics["cumulative_cost"] += evaluated_trial.report.cost - if ( - ( - settings.max_evaluation_time_total_seconds is not None - or evaluated_trial.metadata.evaluation_duration is not None - or settings.max_evaluation_time_total_seconds is not None - ) - and evaluated_trial.metadata.time_started is not None - and evaluated_trial.metadata.time_end is not None - ): - metrics["cumulative_time"] += ( - evaluated_trial.metadata.time_end - - evaluated_trial.metadata.time_started - ) + frontier: list[Trial] = [] + trajectory_confs: dict[str, dict[str, float | int]] = {} - if hasattr(optimizer, "space"): - fidelity_name = "" - if not isinstance(optimizer.space, PipelineSpace): - if optimizer.space.fidelities: - fidelity_name = next(iter(optimizer.space.fidelities.keys())) - elif optimizer.space.fidelity_attrs: - fidelity_name = next(iter(optimizer.space.fidelity_attrs.keys())) - fidelity_name = ( - f"{NepsCompatConverter._ENVIRONMENT_PREFIX}{fidelity_name}" - ) - if ( - fidelity_name in evaluated_trial.config - and evaluated_trial.config[fidelity_name] is not None - ): - metrics["cumulative_fidelities"] += evaluated_trial.config[ - fidelity_name - ] - - if isinstance(evaluated_trial.report.objective_to_minimize, list): - # Skip list objectives for now in incumbent trace - continue - state.new_score = evaluated_trial.report.objective_to_minimize - if state.new_score is not None and state.new_score < _best_score_so_far: - _best_score_so_far = state.new_score - config_dict = { - "score": state.new_score, - "trial_id": evaluated_trial.metadata.id, - "config": evaluated_trial.config, - } - - # Add cost if available - if evaluated_trial.report.cost is not None: - config_dict["cost"] = evaluated_trial.report.cost - state.all_best_configs.append(config_dict) - - for metric in ( - "cumulative_evaluations", - "cumulative_fidelities", - "cumulative_cost", - "cumulative_time", - ): - if metrics[metric] > 0: - config_dict[metric] = metrics[metric] - - # Use the shared function to build trace texts - trace_text, best_config_text = _build_trace_texts(state.all_best_configs) - - with _trace_lock: - with improvement_trace_path.open(mode="w") as f: - f.write(trace_text) - with best_config_path.open(mode="w") as f: - f.write(best_config_text) + for evaluated_trial in evaluated_trials: + single_trial_usage = self._calculate_total_resource_usage( + {evaluated_trial.id: evaluated_trial} + ) + running_usage += single_trial_usage + + assert evaluated_trial.report is not None # for mypy + new_trial_obj = evaluated_trial.report.objective_to_minimize + + if not _is_dominated(new_trial_obj, frontier): + frontier = _prune_and_add_to_frontier(evaluated_trial, frontier) + if not is_mo: + incumbent.append(evaluated_trial) + current_snapshot = ResourceUsage(**asdict(running_usage)) + config_dict = { + "score": new_trial_obj, + "trial_id": evaluated_trial.id, + "config": evaluated_trial.config, + } + if evaluated_trial.report.cost is not None: + config_dict["cost"] = evaluated_trial.report.cost + + config_dict.update(current_snapshot.to_trajectory_dict()) + trajectory_confs[evaluated_trial.id] = config_dict + + optimal_configs: list[dict] = [trajectory_confs[trial.id] for trial in frontier] + incumbent_configs: list[dict] = [ + trajectory_confs[trial.id] for trial in incumbent + ] + + self._write_trajectory_files( + incumbent_configs=incumbent_configs, + optimal_configs=optimal_configs, + trace_lock=_trace_lock, + improvement_trace_path=improvement_trace_path, + best_config_path=best_config_path, + ) def _save_results( @@ -1241,3 +1131,59 @@ def _make_default_report_values( learning_curve_on_error=None, learning_curve_if_not_provided="objective_to_minimize", ) + + +def _to_sequence(score: float | Sequence[float]) -> list[float]: + """Normalize score to a list of floats for pareto comparisons. + + Scalars become single-element lists. Sequences are converted to lists. + """ + if isinstance(score, Sequence): + return [float(x) for x in score] + return [float(score)] + + +def _is_dominated(candidate: float | Sequence[float], frontier: list[Trial]) -> bool: + """Return True if `candidate` is dominated by any point in `frontier`. + + `frontier` is a list of score sequences (as lists). + """ + cand_seq = _to_sequence(candidate) + + for t in frontier: + if t.report is None: + continue + f_seq = _to_sequence(t.report.objective_to_minimize) + if len(f_seq) != len(cand_seq): + continue + if all(fi <= ci for fi, ci in zip(f_seq, cand_seq, strict=False)) and any( + fi < ci for fi, ci in zip(f_seq, cand_seq, strict=False) + ): + return True + return False + + +def _prune_and_add_to_frontier(candidate: Trial, frontier: list[Trial]) -> list[Trial]: + """Add candidate Trial to frontier and remove frontier Trials dominated by it. + + Frontier is a list of Trial objects (with reports). Returns the new frontier + as a list of Trials. + """ + if candidate.report is None: + return frontier + + cand_seq = _to_sequence(candidate.report.objective_to_minimize) + new_frontier: list[Trial] = [] + for t in frontier: + if t.report is None: + continue + f_seq = _to_sequence(t.report.objective_to_minimize) + if ( + len(f_seq) == len(cand_seq) + and all(ci <= fi for ci, fi in zip(cand_seq, f_seq, strict=False)) + and any(ci < fi for ci, fi in zip(cand_seq, f_seq, strict=False)) + ): + continue + new_frontier.append(t) + new_frontier.append(candidate) + return new_frontier diff --git a/neps/state/neps_state.py b/neps/state/neps_state.py index 91a2c7313..78630bd6a 100644 --- a/neps/state/neps_state.py +++ b/neps/state/neps_state.py @@ -261,11 +261,6 @@ class NePSState: _pipeline_space: SearchSpace | PipelineSpace | None = field(repr=False, default=None) - new_score: float = float("inf") - """Tracking of the new incumbent""" - - all_best_configs: list = field(default_factory=list) - """Trajectory to the newest incbumbent""" def __eq__(self, other: object) -> bool: """Compare two NePSState objects for equality. @@ -290,8 +285,6 @@ def __eq__(self, other: object) -> bool: "_err_lock", "_shared_errors_path", "_shared_errors", - "new_score", - "all_best_configs", ]: if getattr(self, field_name) != getattr(other, field_name): return False @@ -335,7 +328,7 @@ def lock_and_set_new_worker_id(self, worker_id: str | None = None) -> str: ) if opt_state.worker_ids and worker_id in opt_state.worker_ids: raise NePSError( - f"Worker id '{worker_id}' already exists, " + f"Worker id '{worker_id}' already exists," f" reserved worker ids: {opt_state.worker_ids}" ) if opt_state.worker_ids is None: diff --git a/neps/status/status.py b/neps/status/status.py index 66d2ab3b0..eaf17583e 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -29,7 +29,33 @@ from neps.space.neps_spaces.parameters import PipelineSpace -def _build_trace_texts(best_configs: list[dict]) -> tuple[str, str]: +def _format_config_entry(entry: dict, indent: str = "") -> str: + """Format a single best-config entry into a text block. + + indent is a string prefixed to the first line for nicer indentation in + the `best_config_text` block. + """ + parts: list[str] = [] + parts.append(f"{indent}Config ID: {entry['trial_id']}") + parts.append(f"Objective to minimize: {entry['score']}") + if "cost" in entry: + parts.append(f"Cost: {entry['cost']}") + + if "cumulative_evaluations" in entry: + parts.append(f"Cumulative evaluations: {entry['cumulative_evaluations']}") + if "cumulative_fidelities" in entry: + parts.append(f"Cumulative fidelities: {entry['cumulative_fidelities']}") + if "cumulative_cost" in entry: + parts.append(f"Cumulative cost: {entry['cumulative_cost']}") + if "cumulative_time" in entry: + parts.append(f"Cumulative time: {entry['cumulative_time']}") + + parts.append(f"Config: {entry['config']}") + + return "\n".join(parts) + "\n" + ("-" * 80) + "\n" + + +def _build_incumbent_content(best_configs: list[dict]) -> str: """Build trace text and best config text from a list of best configurations. Args: @@ -39,72 +65,31 @@ def _build_trace_texts(best_configs: list[dict]) -> tuple[str, str]: Returns: Tuple of (trace_text, best_config_text) strings. """ - trace_text = ( + trace_content = ( "Best configs and their objectives across evaluations:\n" + "-" * 80 + "\n" ) - for best in best_configs: - trace_text += ( - f"Config ID: {best['trial_id']}\nObjective to minimize: {best['score']}\n" - + (f"Cost: {best.get('cost', 0)}\n" if "cost" in best else "") - + ( - f"Cumulative evaluations: {best.get('cumulative_evaluations', 0)}\n" - if "cumulative_evaluations" in best - else "" - ) - + ( - f"Cumulative fidelities: {best.get('cumulative_fidelities', 0)}\n" - if "cumulative_fidelities" in best - else "" - ) - + ( - f"Cumulative cost: {best.get('cumulative_cost', 0)}\n" - if "cumulative_cost" in best - else "" - ) - + ( - f"Cumulative time: {best.get('cumulative_time', 0)}\n" - if "cumulative_time" in best - else "" - ) - + f"Config: {best['config']}\n" - + "-" * 80 - + "\n" - ) + trace_content += _format_config_entry(best) - best_config_text = "" - if best_configs: - # FIX: Find the actual best config by minimum score, not just the last one - best_config = min(best_configs, key=lambda c: c["score"]) - best_config_text = ( - "# Best config:" - f"\n\n Config ID: {best_config['trial_id']}" - f"\n Objective to minimize: {best_config['score']}" - + (f"\n Cost: {best_config['cost']}" if "cost" in best_config else "") - + ( - f"\n Cumulative evaluations: {best_config['cumulative_evaluations']}" - if "cumulative_evaluations" in best_config - else "" - ) - + ( - f"\n Cumulative fidelities: {best_config['cumulative_fidelities']}" - if "cumulative_fidelities" in best_config - else "" - ) - + ( - f"\n Cumulative cost: {best_config['cumulative_cost']}" - if "cumulative_cost" in best_config - else "" - ) - + ( - f"\n Cumulative time: {best_config['cumulative_time']}" - if "cumulative_time" in best_config - else "" - ) - + f"\n Config: {best_config['config']}" - ) + return trace_content + + +def _build_optimal_set_content(best_configs: list[dict]) -> str: + """Build trace text and best config text from a list of best configurations. - return trace_text, best_config_text + Args: + best_configs: List of best configuration dictionaries containing + 'trial_id', 'score', 'config', and optional metrics. + + Returns: + content: str. + """ + trace_text = ( + "Best configs and their objectives across evaluations:\n" + "-" * 80 + "\n" + ) + for best in best_configs: + trace_text += _format_config_entry(best) + return trace_text @dataclass @@ -297,11 +282,6 @@ def formatted( # noqa: PLR0912, C901 def from_directory(cls, root_directory: str | Path) -> Summary: """Create a summary from a neps run directory.""" root_directory = Path(root_directory) - - is_multiobjective: bool = False - best: tuple[Trial, float] | None = None - by_state: dict[State, list[Trial]] = {s: [] for s in State} - # NOTE: We don't lock the shared state since we are just reading and don't need to # make decisions based on the state try: @@ -313,7 +293,20 @@ def from_directory(cls, root_directory: str | Path) -> Summary: trials = shared_state.lock_and_read_trials() - for _trial_id, trial in trials.items(): + return cls.from_trials(trials) + + @classmethod + def from_trials(cls, trials: dict[str, Trial]) -> Summary: + """Summarize a mapping of trials into (by_state, is_multiobjective, best). + + This extracts the core loop from `Summary.from_directory` so callers that + already have a `trials` mapping can reuse the logic without re-reading state. + """ + is_multiobjective: bool = False + best: tuple[Trial, float] | None = None + by_state: dict[State, list[Trial]] = {s: [] for s in State} + + for trial in trials.values(): state = trial.metadata.state by_state[state].append(trial) diff --git a/tests/test_runtime/test_trajectory_and_metrics.py b/tests/test_runtime/test_trajectory_and_metrics.py index d77b99edb..b82a1ca81 100644 --- a/tests/test_runtime/test_trajectory_and_metrics.py +++ b/tests/test_runtime/test_trajectory_and_metrics.py @@ -4,18 +4,34 @@ import re import tempfile +import time from pathlib import Path import pytest +from filelock import FileLock import neps from neps.optimizers import algorithms +from neps.runtime import DefaultWorker from neps.space.neps_spaces.parameters import ( Fidelity, Float, Integer, PipelineSpace, ) +from neps.state.neps_state import NePSState +from neps.state.optimizer import OptimizationState +from neps.state.seed_snapshot import SeedSnapshot +from neps.state.settings import ( + DefaultReportValues, + OnErrorPossibilities, + WorkerSettings, +) +from neps.state.trial import ( + MetaData, + State as TrialState, + Trial, +) class SimpleSpace(PipelineSpace): @@ -672,3 +688,116 @@ def test_continue_finished_run_with_higher_budget(run_number): # Count config entries in trajectory (though trajectory only shows improvements) config_count = final_trajectory.count("Config ID:") assert config_count >= 1, "Should have at least one config entry" + + +def test_best_config_multiobjective_frontier(): + """Test that best_config.txt for a multi-objective run contains both + Pareto-optimal configurations (two entries) when two non-dominated + objective vectors are present. + """ + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "mo_test" + + # Create an empty NePS state (optimizer info is minimal) + optimizer_info = {"name": "test_opt", "info": {}} + state = NePSState.create_or_load( + path=root_directory, + optimizer_info=optimizer_info, + optimizer_state=OptimizationState( + budget=None, + seed_snapshot=SeedSnapshot.new_capture(), + shared_state={}, + ), + ) + + # Build two completed trials with multi-objective vectors (non-dominated) + t1_meta = MetaData( + id="t1", + location=str(state._trial_repo.directory / "config_t1"), + state=TrialState.SUCCESS, + previous_trial_id=None, + previous_trial_location=None, + sampling_worker_id="external", + time_sampled=1.0, + ) + t1 = Trial(config={"which": 0}, metadata=t1_meta, report=None) + r1 = t1.set_complete( + report_as="success", + time_end=time.time(), + objective_to_minimize=[1.0, 0], + cost=None, + learning_curve=None, + err=None, + tb=None, + extra={}, + evaluation_duration=0.0, + ) + t1.report = r1 + + t2_meta = MetaData( + id="t2", + location=str(state._trial_repo.directory / "config_t2"), + state=TrialState.SUCCESS, + previous_trial_id=None, + previous_trial_location=None, + sampling_worker_id="external", + time_sampled=2.0, + ) + t2 = Trial(config={"which": 1}, metadata=t2_meta, report=None) + r2 = t2.set_complete( + report_as="success", + time_end=time.time(), + objective_to_minimize=[0, 1], + cost=None, + learning_curve=None, + err=None, + tb=None, + extra={}, + evaluation_duration=0.0, + ) + t2.report = r2 + + trials = {t1.id: t1, t2.id: t2} + + # Prepare summary paths and lock + summary_dir = root_directory / "summary" + summary_dir.mkdir(parents=True, exist_ok=True) + improvement_trace_path = summary_dir / "best_config_trajectory.txt" + best_config_path = summary_dir / "best_config.txt" + improvement_trace_path.touch() + best_config_path.touch() + + trace_lock = FileLock(str(root_directory / ".trace.lock")) + + # Create a minimal worker (optimizer/eval fn not used by load_incumbent_trace) + settings = WorkerSettings( + on_error=OnErrorPossibilities.IGNORE, + default_report_values=DefaultReportValues(), + batch_size=None, + evaluations_to_spend=None, + include_in_progress_evaluations_towards_maximum=False, + cost_to_spend=None, + fidelities_to_spend=None, + max_evaluation_time_total_seconds=None, + max_wallclock_time_seconds=None, + ) + + worker = DefaultWorker( + state=state, + settings=settings, + evaluation_fn=dict, + optimizer=lambda: None, + worker_id="w_external", + ) + + # Call the function that writes best_config for the given trials + worker.load_incumbent_trace( + trials, trace_lock, improvement_trace_path, best_config_path + ) + + content = best_config_path.read_text() + + # Should contain two Config ID entries for the two non-dominated solutions + assert content.count("Config ID:") == 2 + assert "t1" in content + assert "t2" in content From 42f8ed2518f916431143fadacfe400048ac4210b Mon Sep 17 00:00:00 2001 From: Nastaran Alipour Date: Sun, 23 Nov 2025 14:26:06 +0100 Subject: [PATCH 122/156] fix linter --- neps/state/neps_state.py | 1 - 1 file changed, 1 deletion(-) diff --git a/neps/state/neps_state.py b/neps/state/neps_state.py index 78630bd6a..63806540b 100644 --- a/neps/state/neps_state.py +++ b/neps/state/neps_state.py @@ -261,7 +261,6 @@ class NePSState: _pipeline_space: SearchSpace | PipelineSpace | None = field(repr=False, default=None) - def __eq__(self, other: object) -> bool: """Compare two NePSState objects for equality. From 6780d4944f52a0a6656f735fac1c5e305a774e79 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 24 Nov 2025 16:15:30 +0100 Subject: [PATCH 123/156] feat: Enhance SearchSpace and PipelineSpace compatibility with deprecation warnings Add backward compatibility check --- neps/api.py | 29 +- neps/space/neps_spaces/parameters.py | 22 ++ neps/space/parsing.py | 27 +- neps/space/search_space.py | 99 +++++- .../test_backward_compatibility.py | 302 ++++++++++++++++++ 5 files changed, 462 insertions(+), 17 deletions(-) create mode 100644 tests/test_neps_space/test_backward_compatibility.py diff --git a/neps/api.py b/neps/api.py index b26a7f191..3e4bdb58c 100644 --- a/neps/api.py +++ b/neps/api.py @@ -14,6 +14,7 @@ from neps.normalization import _normalize_imported_config from neps.optimizers import AskFunction, OptimizerChoice, OptimizerInfo, load_optimizer from neps.runtime import _launch_runtime, _save_results +from neps.space import SearchSpace from neps.space.neps_spaces.neps_space import ( adjust_evaluation_pipeline_for_neps_space, check_neps_space_compatibility, @@ -33,7 +34,6 @@ from ConfigSpace import ConfigurationSpace from neps.optimizers.algorithms import CustomOptimizer - from neps.space import SearchSpace from neps.state.pipeline_eval import EvaluatePipelineReturn, UserResultDict logger = logging.getLogger(__name__) @@ -41,7 +41,7 @@ def run( # noqa: C901, D417, PLR0912, PLR0913, PLR0915 evaluate_pipeline: Callable[..., EvaluatePipelineReturn] | str, - pipeline_space: ConfigurationSpace | PipelineSpace | None = None, + pipeline_space: ConfigurationSpace | PipelineSpace | SearchSpace | dict | None = None, *, root_directory: str | Path = "neps_results", overwrite_root_directory: bool = False, @@ -150,8 +150,7 @@ class MySpace(PipelineSpace): This most direct way to specify the pipeline space is as follows: ```python - MySpace(PipelineSpace): - dataset = "mnist" # constant + class MySpace(PipelineSpace): nlayers = neps.Integer(2,10) # integer alpha = neps.Float(0.1, 1.0) # float optimizer = neps.Categorical( # categorical @@ -332,6 +331,24 @@ def __call__( "`evaluations_to_spend` for limiting the number of evaluations for this run.", ) + # If the pipeline_space is a SearchSpace, convert it to a PipelineSpace and throw a + # deprecation warning + if isinstance(pipeline_space, SearchSpace | dict): + if isinstance(pipeline_space, dict): + pipeline_space = SearchSpace(pipeline_space) + pipeline_space = convert_classic_to_neps_search_space(pipeline_space) + space_lines = str(pipeline_space).split("\n") + space_def = space_lines[1] if len(space_lines) > 1 else str(pipeline_space) + warnings.warn( + "Passing a SearchSpace or dictionary to neps.run is deprecated and will be" + " removed in a future version. Please pass a PipelineSpace instead, as" + " described in the NePS-Spaces documentation." + " This specific space should be given as:\n\n```python\nclass" + f" MySpace(PipelineSpace):\n{space_def}\n```\n", + DeprecationWarning, + stacklevel=2, + ) + # Try to load pipeline_space from disk if not provided if pipeline_space is None: root_path = Path(root_directory) @@ -532,7 +549,7 @@ def save_pipeline_results( def import_trials( # noqa: C901 evaluated_trials: Sequence[tuple[Mapping[str, Any], UserResultDict],], root_directory: Path | str, - pipeline_space: SearchSpace | PipelineSpace | None = None, + pipeline_space: SearchSpace | dict | PipelineSpace | None = None, overwrite_root_directory: bool = False, # noqa: FBT001, FBT002 optimizer: ( OptimizerChoice @@ -555,7 +572,7 @@ def import_trials( # noqa: C901 A sequence of tuples, each containing a configuration dictionary and its corresponding result. root_directory (Path or str): The root directory of the NePS run. - pipeline_space (SearchSpace | PipelineSpace | None): The pipeline space + pipeline_space (SearchSpace | dict | PipelineSpace | None): The pipeline space used for the optimization. If None, will attempt to load from the root_directory. If provided and a pipeline space exists on disk, they will be validated to match. diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index a1f94e07b..2c859a825 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -936,6 +936,7 @@ def __init__( prior_confidence: ( Literal["low", "medium", "high"] | ConfidenceLevel | _Unset ) = _UNSET, + **kwargs: object, ): """Initialize the Float domain with min and max values, and optional prior. @@ -945,8 +946,18 @@ def __init__( log: Whether to sample values on a logarithmic scale. prior: The prior value for the domain, if any. prior_confidence: The confidence level of the prior value. + **kwargs: Additional keyword arguments (e.g., is_fidelity) are accepted + for backward compatibility but ignored in PipelineSpace parameters. """ + # Handle is_fidelity for backward compatibility + # Store it silently - user will get warning from neps.run about SearchSpace usage + # TODO: Remove this when removing SearchSpace support + if "is_fidelity" in kwargs: + self._is_fidelity_compat = bool(kwargs.get("is_fidelity", False)) + if any(key != "is_fidelity" for key in kwargs): + raise TypeError(f"Unexpected keyword arguments: {', '.join(kwargs.keys())}.") + self._lower = lower self._upper = upper self._log = log @@ -1154,6 +1165,7 @@ def __init__( prior_confidence: ( Literal["low", "medium", "high"] | ConfidenceLevel | _Unset ) = _UNSET, + **kwargs: object, ): """Initialize the Integer domain with min and max values, and optional prior. @@ -1163,7 +1175,17 @@ def __init__( log: Whether to sample values on a logarithmic scale. prior: The prior value for the domain, if any. prior_confidence: The confidence level of the prior value. + **kwargs: Additional keyword arguments (e.g., is_fidelity) are accepted + for backward compatibility but ignored in PipelineSpace parameters. """ + # Handle is_fidelity for backward compatibility + # Store it silently - user will get warning from neps.run about SearchSpace usage + # TODO: Remove this when removing SearchSpace support + if "is_fidelity" in kwargs: + self._is_fidelity_compat = bool(kwargs.get("is_fidelity", False)) + if any(key != "is_fidelity" for key in kwargs): + raise TypeError(f"Unexpected keyword arguments: {', '.join(kwargs.keys())}.") + self._lower = lower self._upper = upper self._log = log diff --git a/neps/space/parsing.py b/neps/space/parsing.py index 2648a6faa..ef088ca21 100644 --- a/neps/space/parsing.py +++ b/neps/space/parsing.py @@ -9,7 +9,12 @@ from collections.abc import Mapping, Sequence from typing import TYPE_CHECKING, Any, TypeAlias -from neps.space.neps_spaces.parameters import PipelineSpace +from neps.space.neps_spaces.parameters import ( + Categorical, + Float, + Integer, + PipelineSpace, +) from neps.space.parameters import ( HPOCategorical, HPOConstant, @@ -202,6 +207,9 @@ def convert_mapping(pipeline_space: Mapping[str, Any]) -> SearchSpace: match details: case HPOFloat() | HPOInteger() | HPOCategorical() | HPOConstant(): parameters[name] = dataclasses.replace(details) # copy + # New PipelineSpace parameters - converted by SearchSpace.__post_init__ + case Float() | Integer() | Categorical(): + parameters[name] = details # type: ignore[assignment] case str() | int() | float() | Mapping(): try: parameters[name] = as_parameter(details) @@ -217,7 +225,7 @@ def convert_mapping(pipeline_space: Mapping[str, Any]) -> SearchSpace: return SearchSpace(parameters) -def convert_configspace(configspace: ConfigurationSpace) -> SearchSpace: # noqa: C901 +def convert_configspace(configspace: ConfigurationSpace) -> SearchSpace: """Constructs a [`SearchSpace`][neps.space.SearchSpace] from a [`ConfigurationSpace`](https://automl.github.io/ConfigSpace/latest/). @@ -230,12 +238,15 @@ def convert_configspace(configspace: ConfigurationSpace) -> SearchSpace: # noqa import ConfigSpace as CS space: dict[str, Parameter | HPOConstant] = {} - if hasattr(configspace, "conditions") and hasattr(configspace, "forbidden_clauses"): # noqa: SIM102 - if any(configspace.conditions) or any(configspace.forbidden_clauses): - raise NotImplementedError( - "The ConfigurationSpace has conditions or forbidden clauses, " - "which are not supported by neps." - ) + if ( + hasattr(configspace, "conditions") + and hasattr(configspace, "forbidden_clauses") + and (any(configspace.conditions) or any(configspace.forbidden_clauses)) + ): + raise NotImplementedError( + "The ConfigurationSpace has conditions or forbidden clauses, " + "which are not supported by neps." + ) for name, hyperparameter in configspace.items(): match hyperparameter: diff --git a/neps/space/search_space.py b/neps/space/search_space.py index 3d727a535..3f4a4dcf0 100644 --- a/neps/space/search_space.py +++ b/neps/space/search_space.py @@ -3,12 +3,20 @@ any fidelities and constants. """ +# mypy: disable-error-code="unreachable" + from __future__ import annotations from collections.abc import Iterator, Mapping from dataclasses import dataclass, field from typing import Any +from neps.space.neps_spaces.parameters import ( + Categorical as PSCategorical, + ConfidenceLevel, + Float as PSFloat, + Integer as PSInteger, +) from neps.space.parameters import ( HPOCategorical, HPOConstant, @@ -22,7 +30,9 @@ # can check if we accidetally mutate these as we pass the parameters around. # We really should not, and instead make a copy if we really need to. @dataclass -class SearchSpace(Mapping[str, Parameter | HPOConstant]): +class SearchSpace( + Mapping[str, Parameter | HPOConstant | PSCategorical | PSFloat | PSInteger] +): """A container for parameters.""" elements: Mapping[str, Parameter | HPOConstant] = field(default_factory=dict) @@ -63,9 +73,92 @@ def fidelity(self) -> tuple[str, HPOFloat | HPOInteger] | None: """The fidelity parameter for the search space.""" return None if len(self.fidelities) == 0 else next(iter(self.fidelities.items())) - def __post_init__(self) -> None: + def __post_init__(self) -> None: # noqa: C901, PLR0912, PLR0915 + # Convert new PipelineSpace parameters to HPO equivalents if needed + converted_elements = {} + for name, hp in self.elements.items(): + # Check if it's a new PipelineSpace parameter + if isinstance(hp, PSFloat | PSInteger | PSCategorical): + # The user will get a warning from neps.run about using SearchSpace + if isinstance(hp, PSFloat): + # Extract prior if it exists + prior: float | None = None + if hp.has_prior and hp.prior is not None: + prior = float(hp.prior) + # Get string value from ConfidenceLevel enum + prior_confidence: str = "low" + if hp.has_prior: + conf = hp.prior_confidence + prior_confidence = ( + conf.value if isinstance(conf, ConfidenceLevel) else conf + ) + converted_elements[name] = HPOFloat( + lower=float(hp.lower), + upper=float(hp.upper), + log=hp.log, + prior=prior, + prior_confidence=prior_confidence, # type: ignore[arg-type] + is_fidelity=getattr(hp, "_is_fidelity_compat", False), + ) + elif isinstance(hp, PSInteger): + # Extract prior if it exists + prior_int: int | None = None + if hp.has_prior and hp.prior is not None: + prior_int = int(hp.prior) + # Get string value from ConfidenceLevel enum + prior_confidence_int: str = "low" + if hp.has_prior: + conf = hp.prior_confidence + prior_confidence_int = ( + conf.value if isinstance(conf, ConfidenceLevel) else conf + ) + converted_elements[name] = HPOInteger( + lower=int(hp.lower), + upper=int(hp.upper), + log=hp.log, + prior=prior_int, + prior_confidence=prior_confidence_int, # type: ignore[arg-type] + is_fidelity=getattr(hp, "_is_fidelity_compat", False), + ) + elif isinstance(hp, PSCategorical): + # Categorical conversion - extract choices as list + # For SearchSpace, choices should be simple list[float | int | str] + if isinstance(hp.choices, tuple): + choices: list[float | int | str] = list(hp.choices) # type: ignore[arg-type] + else: + # If it's a Domain or complex structure, we can't easily convert + # Just try to use it as-is and let HPOCategorical validate + choices = list(hp.choices) # type: ignore[arg-type, assignment] + + # Extract prior if it exists + # In PipelineSpace, prior is index; in SearchSpace, actual value + prior_cat: float | int | str | None = None + if ( + hp.has_prior + and isinstance(hp.prior, int) + and 0 <= hp.prior < len(choices) + ): + # Convert index to actual choice + prior_cat = choices[hp.prior] # type: ignore[assignment] + + # Get string value from ConfidenceLevel enum + prior_confidence_cat: str = "low" + if hp.has_prior: + conf = hp.prior_confidence + prior_confidence_cat = ( + conf.value if isinstance(conf, ConfidenceLevel) else conf + ) + + converted_elements[name] = HPOCategorical( + choices=choices, + prior=prior_cat, + prior_confidence=prior_confidence_cat, # type: ignore[arg-type] + ) + else: + converted_elements[name] = hp + # Ensure that we have a consistent order for all our items. - self.elements = dict(sorted(self.elements.items(), key=lambda x: x[0])) + self.elements = dict(sorted(converted_elements.items(), key=lambda x: x[0])) fidelities: dict[str, HPOFloat | HPOInteger] = {} numerical: dict[str, HPOFloat | HPOInteger] = {} diff --git a/tests/test_neps_space/test_backward_compatibility.py b/tests/test_neps_space/test_backward_compatibility.py new file mode 100644 index 000000000..97bf5c14c --- /dev/null +++ b/tests/test_neps_space/test_backward_compatibility.py @@ -0,0 +1,302 @@ +"""Test backward compatibility with old SearchSpace and dict-based spaces.""" + +from __future__ import annotations + +import tempfile +import warnings +from pathlib import Path + +import pytest + +import neps +from neps.optimizers import algorithms +from neps.space import HPOCategorical, HPOFloat, HPOInteger, SearchSpace +from neps.space.neps_spaces.parameters import ( + Categorical, + Float, + Integer, + PipelineSpace, +) + + +def simple_evaluation(learning_rate: float, num_layers: int, optimizer: str) -> float: + """Simple evaluation function.""" + return learning_rate * num_layers + (0.1 if optimizer == "adam" else 0.2) + + +def test_searchspace_with_hpo_parameters(): + """Test SearchSpace with old HPO* parameters still works.""" + pipeline_space = SearchSpace( + { + "learning_rate": HPOFloat(1e-4, 1e-1, log=True), + "num_layers": HPOInteger(1, 10), + "optimizer": HPOCategorical(["adam", "sgd", "rmsprop"]), + } + ) + + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "searchspace_hpo_test" + + # Should warn about using SearchSpace instead of PipelineSpace + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=pipeline_space, + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Should get deprecation warning about SearchSpace + assert any( + issubclass(warning.category, DeprecationWarning) + and "SearchSpace" in str(warning.message) + for warning in w + ), "Should warn about using SearchSpace" + + assert root_directory.exists() + + +def test_searchspace_with_new_parameters(): + """Test SearchSpace with new PipelineSpace parameters (Float, Integer, Categorical).""" + pipeline_space = SearchSpace( + { + "learning_rate": Float(1e-4, 1e-1, log=True), + "num_layers": Integer(1, 10), + "optimizer": Categorical(["adam", "sgd", "rmsprop"]), + } + ) + + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "searchspace_new_test" + + # Should warn about using SearchSpace instead of PipelineSpace + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=pipeline_space, + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Should get deprecation warning about SearchSpace + assert any( + issubclass(warning.category, DeprecationWarning) + and "SearchSpace" in str(warning.message) + for warning in w + ), "Should warn about using SearchSpace" + + assert root_directory.exists() + + +def test_dict_with_hpo_parameters(): + """Test dict-based space with old HPO* parameters still works.""" + pipeline_space = { + "learning_rate": HPOFloat(1e-4, 1e-1, log=True), + "num_layers": HPOInteger(1, 10), + "optimizer": HPOCategorical(["adam", "sgd", "rmsprop"]), + } + + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "dict_hpo_test" + + # Should warn about using dict instead of PipelineSpace + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=pipeline_space, + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Should get deprecation warning about dict + assert any( + issubclass(warning.category, DeprecationWarning) + and "dictionary" in str(warning.message).lower() + for warning in w + ), "Should warn about using dict" + + assert root_directory.exists() + + +def test_dict_with_new_parameters(): + """Test dict-based space with new PipelineSpace parameters (Float, Integer, Categorical).""" + pipeline_space = { + "learning_rate": Float(1e-4, 1e-1, log=True), + "num_layers": Integer(1, 10), + "optimizer": Categorical(["adam", "sgd", "rmsprop"]), + } + + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "dict_new_test" + + # Should warn about using dict instead of PipelineSpace + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=pipeline_space, + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Should get deprecation warning about dict + assert any( + issubclass(warning.category, DeprecationWarning) + and "dictionary" in str(warning.message).lower() + for warning in w + ), "Should warn about using dict" + + assert root_directory.exists() + + +def test_searchspace_with_is_fidelity(): + """Test SearchSpace with is_fidelity parameter (old style) still works.""" + pipeline_space = SearchSpace( + { + "learning_rate": Float(1e-4, 1e-1, log=True), + "num_layers": Integer(1, 10), + "optimizer": Categorical(["adam", "sgd", "rmsprop"]), + "epochs": Integer(1, 100, is_fidelity=True), + } + ) + + def fidelity_evaluation( + learning_rate: float, + num_layers: int, + optimizer: str, + epochs: int, + ) -> float: + """Evaluation with fidelity.""" + return learning_rate * num_layers * epochs / 100 + + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "searchspace_fidelity_test" + + # Should work without errors (just warn about SearchSpace) + # Use neps_hyperband which supports fidelities + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + neps.run( + evaluate_pipeline=fidelity_evaluation, + pipeline_space=pipeline_space, + optimizer=algorithms.neps_hyperband, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Should get deprecation warning about SearchSpace + assert any( + issubclass(warning.category, DeprecationWarning) + and "SearchSpace" in str(warning.message) + for warning in w + ), "Should warn about using SearchSpace" + + assert root_directory.exists() + + +def test_dict_with_is_fidelity(): + """Test dict-based space with is_fidelity parameter (old style) still works.""" + pipeline_space = { + "learning_rate": Float(1e-4, 1e-1, log=True), + "num_layers": Integer(1, 10), + "optimizer": Categorical(["adam", "sgd", "rmsprop"]), + "epochs": Integer(1, 100, is_fidelity=True), + } + + def fidelity_evaluation( + learning_rate: float, + num_layers: int, + optimizer: str, + epochs: int, + ) -> float: + """Evaluation with fidelity.""" + return learning_rate * num_layers * epochs / 100 + + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "dict_fidelity_test" + + # Should work without errors (just warn about dict) + # Use neps_hyperband which supports fidelities + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + neps.run( + evaluate_pipeline=fidelity_evaluation, + pipeline_space=pipeline_space, + optimizer=algorithms.neps_hyperband, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Should get deprecation warning about dict + assert any( + issubclass(warning.category, DeprecationWarning) + and "dictionary" in str(warning.message).lower() + for warning in w + ), "Should warn about using dict" + + assert root_directory.exists() + + +def test_proper_pipelinespace_no_warnings(): + """Test that using proper PipelineSpace class doesn't trigger warnings.""" + + class TestSpace(PipelineSpace): + learning_rate = Float(1e-4, 1e-1, log=True) + num_layers = Integer(1, 10) + optimizer = Categorical(["adam", "sgd", "rmsprop"]) + + with tempfile.TemporaryDirectory() as tmp_dir: + root_directory = Path(tmp_dir) / "pipelinespace_test" + + # Should NOT warn when using proper PipelineSpace + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + neps.run( + evaluate_pipeline=simple_evaluation, + pipeline_space=TestSpace(), + optimizer=algorithms.neps_random_search, + root_directory=str(root_directory), + evaluations_to_spend=3, + overwrite_root_directory=True, + ) + + # Should NOT get deprecation warning about SearchSpace or dict + deprecation_warnings = [ + warning + for warning in w + if issubclass(warning.category, DeprecationWarning) + and ( + "SearchSpace" in str(warning.message) + or "dictionary" in str(warning.message).lower() + ) + ] + assert len(deprecation_warnings) == 0, ( + "Should not warn when using proper PipelineSpace, " + f"but got: {[str(w.message) for w in deprecation_warnings]}" + ) + + assert root_directory.exists() + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) From 849ba1de635839c210d340cf6f486fdd9a656156 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 25 Nov 2025 18:00:23 +0100 Subject: [PATCH 124/156] feat: Add deprecation warnings for 'is_fidelity' argument in Float and Integer classes --- neps/space/neps_spaces/parameters.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 2c859a825..918f1a1ea 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -10,6 +10,7 @@ import enum import math import random +import warnings from collections.abc import Callable, Mapping, Sequence from typing import ( Any, @@ -954,6 +955,12 @@ def __init__( # Store it silently - user will get warning from neps.run about SearchSpace usage # TODO: Remove this when removing SearchSpace support if "is_fidelity" in kwargs: + warnings.warn( + "`is_fidelity` argument is deprecated and will be removed in future" + " versions. Please update your code accordingly.", + DeprecationWarning, + stacklevel=2, + ) self._is_fidelity_compat = bool(kwargs.get("is_fidelity", False)) if any(key != "is_fidelity" for key in kwargs): raise TypeError(f"Unexpected keyword arguments: {', '.join(kwargs.keys())}.") @@ -1182,6 +1189,12 @@ def __init__( # Store it silently - user will get warning from neps.run about SearchSpace usage # TODO: Remove this when removing SearchSpace support if "is_fidelity" in kwargs: + warnings.warn( + "`is_fidelity` argument is deprecated and will be removed in future" + " versions. Please update your code accordingly.", + DeprecationWarning, + stacklevel=2, + ) self._is_fidelity_compat = bool(kwargs.get("is_fidelity", False)) if any(key != "is_fidelity" for key in kwargs): raise TypeError(f"Unexpected keyword arguments: {', '.join(kwargs.keys())}.") From a6963f75820f1971a76149ddbcbbe9cc593a4ce7 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 25 Nov 2025 23:52:40 +0100 Subject: [PATCH 125/156] Add comprehensive tests for operation_formatter and update related tests - Introduced a new test suite for the operation_formatter module, covering various scenarios including simple operations, nested operations, and complex structures. - Updated existing tests in search_space modules to utilize the new operation_formatter for string conversion. - Removed hardcoded expected strings in favor of checking for essential elements in the output, accommodating the new expanded formatting style. - Ensured that tests validate the presence of key operations and parameters rather than exact string matches, reflecting the changes in output formatting. --- .gitignore | 1 + neps/space/neps_spaces/config_string.py | 410 +-------------- neps/space/neps_spaces/neps_space.py | 95 +--- neps/space/neps_spaces/operation_formatter.py | 295 +++++++++++ neps/space/neps_spaces/parameters.py | 6 +- neps/status/status.py | 48 +- neps_examples/__init__.py | 7 +- .../async_evaluation/run_pipeline.py | 0 .../async_evaluation/submit.py | 0 .../basic_usage/architecture_search.py | 85 +++ neps_examples/basic_usage/hyperparameters.py | 7 +- .../basic_usage/pytorch_nn_example.py | 2 - .../{analyse.py => run_analysis.py} | 5 +- neps_examples/convenience/config_creation.py | 4 +- tests/test_examples.py | 3 +- tests/test_neps_space/test_config_string.py | 287 ---------- .../test_operation_formatter.py | 493 ++++++++++++++++++ .../test_search_space__grammar_like.py | 40 +- .../test_search_space__hnas_like.py | 94 +--- .../test_search_space__nos_like.py | 4 +- .../test_search_space__reuse_arch_elements.py | 153 +++--- 21 files changed, 1045 insertions(+), 994 deletions(-) create mode 100644 neps/space/neps_spaces/operation_formatter.py rename neps_examples/{convenience => }/async_evaluation/run_pipeline.py (100%) rename neps_examples/{convenience => }/async_evaluation/submit.py (100%) create mode 100644 neps_examples/basic_usage/architecture_search.py rename neps_examples/basic_usage/{analyse.py => run_analysis.py} (82%) delete mode 100644 tests/test_neps_space/test_config_string.py create mode 100644 tests/test_neps_space/test_operation_formatter.py diff --git a/.gitignore b/.gitignore index 69ba4ec92..410f99bc5 100644 --- a/.gitignore +++ b/.gitignore @@ -41,6 +41,7 @@ jahs_bench_data/ # Jupyter .ipynb_checkpoints/ +*.ipynb # MacOS *.DS_Store diff --git a/neps/space/neps_spaces/config_string.py b/neps/space/neps_spaces/config_string.py index a31e05c7a..9f5bfe583 100644 --- a/neps/space/neps_spaces/config_string.py +++ b/neps/space/neps_spaces/config_string.py @@ -1,416 +1,60 @@ -"""This module provides functionality to unwrap and wrap configuration strings -used in NePS spaces. It defines the `UnwrappedConfigStringPart` data class -to represent parts of the unwrapped configuration string and provides -functions to unwrap a configuration string into these parts and to wrap -unwrapped parts back into a configuration string. +"""This module provides functionality to format configuration strings +used in NePS spaces for display purposes. """ from __future__ import annotations -import dataclasses -import functools -from collections.abc import Callable -from typing import Any +from typing import TYPE_CHECKING, Any +from neps.space.neps_spaces.operation_formatter import operation_to_string -@dataclasses.dataclass(frozen=True) -class UnwrappedConfigStringPart: - """A data class representing a part of an unwrapped configuration string. - - Args: - level: The hierarchy level of this part in the configuration string. - opening_index: The index of the opening parenthesis in the original string. - operator: The operator of this part, which is the first word in the - parenthesis. - hyperparameters: The hyperparameters of this part, if any, enclosed in curly - braces. - operands: The operands of this part, which are the remaining content in the - parenthesis. - """ - - level: int - opening_index: int - operator: str | Callable[..., Any] - hyperparameters: str - operands: str - - -@functools.lru_cache(maxsize=2000) -def unwrap_config_string(config_string: str) -> tuple[UnwrappedConfigStringPart, ...]: - """For a given config string, gets the parenthetic contents of it - and uses them to construct objects of type `UnwrappedConfigStringPart`. - First unwraps a given parenthesised config_string into parts. - Then it converts these parts into objects with structured information. - - Args: - config_string: The configuration string to be unwrapped. - - Returns: - A tuple of `UnwrappedConfigStringPart` objects representing the unwrapped - configuration string. - """ - # Handle simple strings with no parentheses - if "(" not in config_string: - # This is a simple operator name with no nested structure - # Create a single unwrapped part for it - item = UnwrappedConfigStringPart( - level=1, - opening_index=0, - operator=config_string.strip(), - hyperparameters="{}", - operands="", - ) - return (item,) - - # A workaround needed since in the existing configurations - # generated by previous methods, e.g. the `resBlock resBlock` and `resBlock` items - # occur without wrapping parenthesis, differently from other items. - # Wrap them appropriately in parentheses here and in the inverse process. - # For example 'id' comes in two forms: 'id id' and 'Ops id', - # only the 'id id' variant should be replaced. - replacements = [ - ("resBlock", True), - ("id", False), - ] - for op, replace_individual in replacements: - config_string = config_string.replace(f"{op} {op}", "__TMP_PLACEHOLDER___") - if replace_individual: - config_string = config_string.replace(f"{op}", f"({op})") - config_string = config_string.replace("__TMP_PLACEHOLDER___", f"({op} {op})") - - result = [] - - stack = [] - opening_counter = 0 - for current_char_index, current_char in enumerate(config_string): - if current_char == "(": - stack.append((current_char_index, opening_counter)) - opening_counter += 1 - elif current_char == ")": - assert stack, f"Found ')' with no matching '('. Index: {current_char_index}" - - start_char_index, opening_index = stack.pop() - level = len(stack) + 1 # start level counting from 1 and not 0 - - # Extract the content between the matching parentheses - content_inside_parens = config_string[ - start_char_index + 1 : current_char_index - ] - - # Find the operator name by looking backwards from the opening parenthesis - # to find where the operator name starts (either start of string, or after a - # comma/space) - operator_start = start_char_index - 1 - while operator_start >= 0 and config_string[operator_start] not in ",(": - operator_start -= 1 - operator_start += 1 # Move forward to the first character of the operator - - operator = config_string[operator_start:start_char_index] - operands = content_inside_parens - - # Handle hyperparameters enclosed in curly braces - if " {" in operator: - operator, hyperparameters = operator.split(" {", maxsplit=1) - hyperparameters = "{" + hyperparameters - else: - hyperparameters = "{}" - - item = UnwrappedConfigStringPart( - level=level, - opening_index=opening_index, - operator=operator, - hyperparameters=hyperparameters, - operands=operands, - ) - result.append(item) - - assert not stack, f"For '(' found no matching ')': Index: {stack[0][0]}" - return tuple(sorted(result, key=lambda x: x.opening_index)) - - -# Current profiling shows this function does not run that often -# so no need for caching -def wrap_config_into_string( # noqa: C901, PLR0915 - unwrapped_config: tuple[UnwrappedConfigStringPart, ...], - max_level: int | None = None, -) -> str: - """For a given unwrapped config, returns the string representing it. - - Args: - unwrapped_config: The unwrapped config - max_level: An optional int telling which is the maximal considered level. - Bigger levels are ignored. - - Returns: - The string representation of the unwrapped config. - """ - if not unwrapped_config: - return "" - - # Build a tree structure from the unwrapped parts - # Group children by their parent's (level, opening_index) - children_by_parent: dict[tuple[int, int], list[UnwrappedConfigStringPart]] = {} - for item in unwrapped_config: - if max_level is not None and item.level > max_level: - continue - parent_key = ( - (item.level - 1, item.opening_index - 1) if item.level > 1 else (0, -1) - ) - if parent_key not in children_by_parent: - children_by_parent[parent_key] = [] - children_by_parent[parent_key].append(item) - - def reconstruct(item: UnwrappedConfigStringPart) -> str: # noqa: C901, PLR0912 - """Recursively reconstruct the config string for an item.""" - operator_name = ( - item.operator.__name__ if callable(item.operator) else item.operator - ) - - # Check if we have hyperparameters (not empty and not just "{}") - has_hyperparameters = ( - item.hyperparameters - and item.hyperparameters.strip() - and item.hyperparameters != "{}" - ) - - # Get children of this item - item_key = (item.level, item.opening_index) - children = children_by_parent.get(item_key, []) - - # Reconstruct operands - if not item.operands and not children: - # No operands at all - just return the operator name - # (or operator with hyperparameters if they exist) - if has_hyperparameters: - return f"{operator_name}({item.hyperparameters})" - return operator_name - if not children: - # Leaf node - wrap the operands (and hyperparameters if present) - if has_hyperparameters: - return f"{operator_name}({item.hyperparameters}, {item.operands})" - return f"{operator_name}({item.operands})" - # Has children - need to mix nested and non-nested operands - # Parse operands to separate nested from non-nested - parts = [] - if item.operands: - current_part = [] # type: ignore[var-annotated] - paren_depth = 0 - for char in item.operands: - if char == "(": - paren_depth += 1 - elif char == ")": - paren_depth -= 1 - elif char == "," and paren_depth == 0: - parts.append("".join(current_part).strip()) - current_part = [] - continue - current_part.append(char) - if current_part: - parts.append("".join(current_part).strip()) - - # Reconstruct each part - nested ones recursively, non-nested as-is - reconstructed_parts = [] - child_idx = 0 - for part in parts: - if "(" in part: - # This is a nested structure - should have a corresponding child - if child_idx < len(children): - reconstructed_parts.append(reconstruct(children[child_idx])) - child_idx += 1 - else: - # Shouldn't happen, but fallback - reconstructed_parts.append(part) - else: - # Non-nested operand - just add it - reconstructed_parts.append(part) - - # Add any remaining children that weren't referenced in operands - while child_idx < len(children): - reconstructed_parts.append(reconstruct(children[child_idx])) - child_idx += 1 - - # Build result with hyperparameters if present - if has_hyperparameters: - return ( - f"{operator_name}({item.hyperparameters}," - f" {', '.join(reconstructed_parts)})" - ) - return f"{operator_name}({', '.join(reconstructed_parts)})" - - # Start reconstruction from the root (level 1 items) - root_items = [item for item in unwrapped_config if item.level == 1] - if not root_items: - return "" - - # If there's only one root, reconstruct it - if len(root_items) == 1: - result = reconstruct(root_items[0]) - else: - # Multiple roots - join with commas - result = ", ".join(reconstruct(item) for item in root_items) - - # A workaround needed since in the existing configurations - # generated by previous methods, e.g. the `resBlock resBlock` and `resBlock` items - # occur without wrapping parenthesis, differently from other items. - # Wrap them appropriately in parentheses here and in the inverse process. - # For example 'id' comes in two forms: 'id id' and 'Ops id', - # only the 'id id' variant should be replaced. - replacements = [ - ("resBlock", True), - ("id", False), - ] - for op, replace_individual in replacements: - result = result.replace(f"({op} {op})", "__TMP_PLACEHOLDER___") - if replace_individual: - result = result.replace(f"({op})", f"{op}") - result = result.replace("__TMP_PLACEHOLDER___", f"{op} {op}") - return result +if TYPE_CHECKING: + from neps.space.neps_spaces.parameters import Operation class ConfigString: """A class representing a configuration string in NePS spaces. - It provides methods to unwrap the configuration string into structured parts, - retrieve the maximum hierarchy level, and get a representation of the configuration - at a specific hierarchy level. + + This class provides pretty-formatted output for displaying Operation objects + to users. It uses the new operation_formatter module internally. """ - def __init__(self, config_string: str) -> None: - """Initialize the ConfigString with a given configuration string. + def __init__(self, config: str | Operation | Any) -> None: + """Initialize the ConfigString with a configuration. Args: - config_string: The configuration string to be wrapped. - - Raises: - ValueError: If the config_string is None or empty. - """ - if config_string is None or len(config_string) == 0: - raise ValueError(f"Invalid config string: {config_string}") - self.config_string = config_string - - # The fields below are needed for lazy and cached evaluation. - # In python 3.8+ can be replaced by `cached_property` - self._unwrapped: tuple[UnwrappedConfigStringPart, ...] | None = None - self._max_hierarchy_level: int | None = None - - # a cache for the different hierarchy levels of this config string - self._at_hierarchy_level_cache: dict[int, ConfigString] = {} - - @property - def unwrapped(self) -> tuple[UnwrappedConfigStringPart, ...]: - """Get the unwrapped representation of the configuration string. - - Returns: - A tuple of UnwrappedConfigStringPart objects representing the unwrapped - config. - - Raises: - ValueError: If there is an error unwrapping the config string. - """ - # If the unwrapped is already cached, return it - if self._unwrapped is not None: - return self._unwrapped - - unwrapped = unwrap_config_string(self.config_string) - if not unwrapped: - raise ValueError(f"Error unwrapping config string: {self.config_string}") - - # NOTE: Previously, here was a test that compared wrap_config_into_string - # (unwrapped_config=unwrapped) to unwrapped. As it frequently failed and was - # deemed to be unnecessary, it was removed - - self._unwrapped = unwrapped - return self._unwrapped - - @property - def max_hierarchy_level(self) -> int: - """Get the maximum hierarchy level of the configuration string. - - Returns: - The maximum hierarchy level of the configuration string. + config: Either a string (for backward compatibility) or an Operation object Raises: - ValueError: If the maximum hierarchy level is invalid. + ValueError: If the config is None or empty. """ - if self._max_hierarchy_level is not None: - return self._max_hierarchy_level - - max_hierarchy_level = max(i.level for i in self.unwrapped) - assert max_hierarchy_level > 0, ( - f"Invalid max hierarchy level: {self.max_hierarchy_level}" - ) + if config is None or (isinstance(config, str) and len(config) == 0): + raise ValueError(f"Invalid config: {config}") - self._max_hierarchy_level = max_hierarchy_level - return self._max_hierarchy_level + self.config = config - def at_hierarchy_level(self, level: int) -> ConfigString: - """Get the configuration string at a specific hierarchy level. - - Args: - level: The hierarchy level to retrieve the configuration string for. + def pretty_format(self) -> str: + """Get a pretty formatted string representation of the configuration. Returns: - A ConfigString object representing the configuration at the specified - hierarchy level. - - Raises: - ValueError: If the level is invalid (0 or out of bounds). + A Pythonic multi-line string representation with proper indentation. """ - if level == 0: - raise ValueError(f"Invalid value for `level`. Received level == 0: {level}") - if level > self.max_hierarchy_level: - raise ValueError( - "Invalid value for `level`. " - + f"level>max_hierarchy_level: {level}>{self.max_hierarchy_level}" - ) - if level < -self.max_hierarchy_level: - raise ValueError( - "Invalid value for `level`. " - + f"level<-max_hierarchy_level: {level}<-{self.max_hierarchy_level}" - ) - - if level < 0: - # for example for level=-1, when max_hierarchy_level=7, new level is 7 - # for example for level=-3, when max_hierarchy_level=7, new level is 5 - level = self.max_hierarchy_level + (level + 1) - - if level in self._at_hierarchy_level_cache: - return self._at_hierarchy_level_cache[level] - - config_string_at_hierarchy_level = wrap_config_into_string( - unwrapped_config=self.unwrapped, max_level=level - ) - config_at_hierarchy_level = ConfigString(config_string_at_hierarchy_level) - self._at_hierarchy_level_cache[level] = config_at_hierarchy_level + from neps.space.neps_spaces.parameters import Operation - return self._at_hierarchy_level_cache[level] + if isinstance(self.config, Operation): + # Use the new formatter for Operation objects + return operation_to_string(self.config) - def pretty_format(self) -> str: - """Get a pretty formatted string representation of the configuration string. - - Returns: - A string representation of the configuration string with indentation - based on the hierarchy level of each part. - """ - format_str_with_kwargs = ( - "{indent}{item.level:0>2d} :: {item.operator} {item.hyperparameters}" - ) - format_str_no_kwargs = "{indent}{item.level:0>2d} :: {item.operator}" - lines = [self.config_string] - for item in self.unwrapped: - if item.hyperparameters not in {"{}", ""}: - line = format_str_with_kwargs.format(item=item, indent="\t" * item.level) - else: - line = format_str_no_kwargs.format(item=item, indent="\t" * item.level) - lines.append(line) - return "\n".join(lines) + # For string config (backward compatibility), just return as-is + return str(self.config) def __eq__(self, other: object) -> bool: if isinstance(other, self.__class__): - return self.config_string == other.config_string + return str(self.config) == str(other.config) raise NotImplementedError() # let the other side check for equality def __ne__(self, other: object) -> bool: return not self.__eq__(other) def __hash__(self) -> int: - return self.config_string.__hash__() + return str(self.config).__hash__() diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 445598701..748c1d84a 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -13,7 +13,6 @@ import neps from neps.optimizers import algorithms, optimizer -from neps.space.neps_spaces import config_string from neps.space.neps_spaces.parameters import ( _UNSET, Categorical, @@ -930,95 +929,6 @@ def convert_operation_to_callable(operation: Operation) -> Callable: return cast(Callable, operator(*operation_args, **operation_kwargs)) -def _serialize_operation(operation: Operation | str | Callable) -> str: - """Serialize an operation to its string representation. - - This is a helper function to convert Operation objects to strings - for inclusion in the operands field of UnwrappedConfigStringPart. - """ - if isinstance(operation, str): - return operation - - # Handle non-Operation objects (e.g., resolved PyTorch modules, integers, etc.) - if not isinstance(operation, Operation): - return str(operation) - - # For Operation objects, build the string representation - operator_name = ( - operation.operator - if isinstance(operation.operator, str) - else operation.operator.__name__ - ) - - if not operation.args: - # No operands - just return operator name - return operator_name - - # Recursively serialize operands - operand_strs = [_serialize_operation(arg) for arg in operation.args] - return f"{operator_name}({', '.join(operand_strs)})" - - -def _operation_to_unwrapped_config( - operation: Operation | str, - level: int = 1, - opening_index: int = 0, -) -> tuple[list[config_string.UnwrappedConfigStringPart], int]: - """Convert an Operation to unwrapped config parts. - - Returns: - A tuple of (list of parts, next available opening_index) - """ - result = [] - - if isinstance(operation, Operation): - operator = operation.operator - kwargs = str(operation.kwargs) - - # Build operands string and collect child parts - operand_strs = [] - all_child_parts = [] - next_opening = opening_index + 1 - - for operand in operation.args: - if isinstance(operand, Operation): - # Only create child parts if the operation has operands - # (otherwise it's just a simple name like "ReLU") - if operand.args: - # Recursively get unwrapped parts for the nested operation - child_parts, next_opening = _operation_to_unwrapped_config( - operand, level + 1, next_opening - ) - all_child_parts.extend(child_parts) - # Serialize this operand to a string for the operands field - operand_strs.append(_serialize_operation(operand)) - else: - operand_strs.append(str(operand)) - - # Create operands string - operands_str = ", ".join(operand_strs) - - item = config_string.UnwrappedConfigStringPart( - level=level, - opening_index=opening_index, - operator=operator, - hyperparameters=kwargs, - operands=operands_str, - ) - result.append(item) - result.extend(all_child_parts) - - return result, next_opening - item = config_string.UnwrappedConfigStringPart( - level=level, - opening_index=opening_index, - operator=operation, - hyperparameters="", - operands="", - ) - return [item], opening_index + 1 - - def convert_operation_to_string(operation: Operation | str | int | float) -> str: """Convert an Operation to a string representation. @@ -1031,12 +941,13 @@ def convert_operation_to_string(operation: Operation | str | int | float) -> str Raises: ValueError: If the operation is not a valid Operation object. """ + from neps.space.neps_spaces.operation_formatter import operation_to_string + # Handle non-Operation values (resolved primitives) if not isinstance(operation, Operation): return str(operation) - unwrapped_config, _ = _operation_to_unwrapped_config(operation) - return config_string.wrap_config_into_string(tuple(unwrapped_config)) + return operation_to_string(operation) # ------------------------------------------------- diff --git a/neps/space/neps_spaces/operation_formatter.py b/neps/space/neps_spaces/operation_formatter.py new file mode 100644 index 000000000..f2b0ae00d --- /dev/null +++ b/neps/space/neps_spaces/operation_formatter.py @@ -0,0 +1,295 @@ +"""Pretty formatting for Operation objects. + +This module provides functionality to convert Operation objects into +human-readable formatted strings. The format is Pythonic and preserves +all information including nested operations, lists, tuples, and dicts. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from neps.space.neps_spaces.parameters import Operation + + +@dataclass +class FormatterStyle: + """Configuration for the formatting style.""" + + indent_str: str = " " # Two spaces for indentation + max_line_length: int = 80 # Try to keep lines under this length + compact_threshold: int = 40 # Use compact format if repr is shorter + show_empty_args: bool = True # Show () for operations with no args/kwargs + + +def _format_value( + value: Any, + indent: int, + style: FormatterStyle, +) -> str: + """Format a value (could be primitive, list, tuple, dict, or Operation). + + Args: + value: The value to format + indent: Current indentation level + style: Formatting style configuration + + Returns: + Formatted string representation of the value + """ + from neps.space.neps_spaces.parameters import Operation + + if isinstance(value, Operation): + # Recursively format nested operations + return _format_operation(value, indent, style) + + if isinstance(value, list | tuple): + return _format_sequence(value, indent, style) + + if isinstance(value, dict): + return _format_dict(value, indent, style) + + # For strings that look like identifiers (operation names), don't add quotes + # to match the previous formatter's behavior + if isinstance(value, str) and value.isidentifier(): + return value + + # For other primitives, use repr to get proper quoting + return repr(value) + + +def _format_sequence( + seq: list | tuple, + indent: int, + style: FormatterStyle, +) -> str: + """Format a list or tuple, using compact or expanded format as needed.""" + from neps.space.neps_spaces.parameters import Operation + + if not seq: + return "[]" if isinstance(seq, list) else "()" + + # Try compact format first + compact = repr(seq) + if len(compact) <= style.compact_threshold and "\n" not in compact: + return compact + + # Use expanded format for complex sequences + bracket_open = "[" if isinstance(seq, list) else "(" + bracket_close = "]" if isinstance(seq, list) else ")" + + indent_str = style.indent_str * indent + inner_indent_str = style.indent_str * (indent + 1) + + # Check if any element is an Operation (needs expansion) + has_operations = any(isinstance(item, Operation) for item in seq) + + if has_operations: + # Full expansion with each item on its own line + lines = [bracket_open] + for item in seq: + formatted = _format_value(item, indent + 1, style) + lines.append(f"{inner_indent_str}{formatted},") + lines.append(f"{indent_str}{bracket_close}") + return "\n".join(lines) + + # Simple items - try to fit multiple per line + lines = [bracket_open] + current_line: list[str] = [] + current_length = 0 + + for item in seq: + item_repr = repr(item) + item_len = len(item_repr) + 2 # +2 for ", " + + if current_line and current_length + item_len > style.max_line_length: + # Start new line + lines.append(f"{inner_indent_str}{', '.join(current_line)},") + current_line = [item_repr] + current_length = len(item_repr) + else: + current_line.append(item_repr) + current_length += item_len + + if current_line: + lines.append(f"{inner_indent_str}{', '.join(current_line)},") + + lines.append(f"{indent_str}{bracket_close}") + return "\n".join(lines) + + +def _format_dict( + d: dict, + indent: int, + style: FormatterStyle, +) -> str: + """Format a dictionary.""" + if not d: + return "{}" + + # Try compact format first + compact = repr(d) + if len(compact) <= style.compact_threshold: + return compact + + # Use expanded format + indent_str = style.indent_str * indent + inner_indent_str = style.indent_str * (indent + 1) + + lines = ["{"] + for key, value in d.items(): + formatted_value = _format_value(value, indent + 1, style) + lines.append(f"{inner_indent_str}{key!r}: {formatted_value},") + lines.append(f"{indent_str}}}") + return "\n".join(lines) + + +def _format_operation( + operation: Operation, + indent: int, + style: FormatterStyle, +) -> str: + """Format an Operation object. + + Args: + operation: The Operation to format + indent: Current indentation level + style: Formatting style configuration + + Returns: + Formatted string representation + """ + # Get operator name + operator_name = ( + operation.operator + if isinstance(operation.operator, str) + else operation.operator.__name__ + ) + + # Check if we have any args or kwargs + has_args = operation.args and len(operation.args) > 0 + has_kwargs = operation.kwargs and len(operation.kwargs) > 0 + + if not has_args and not has_kwargs: + # Empty operation + return f"{operator_name}()" if style.show_empty_args else operator_name + + # Always use multi-line format for consistency and readability + # Build the multi-line formatted string + indent_str = style.indent_str * indent + inner_indent_str = style.indent_str * (indent + 1) + + lines = [f"{operator_name}("] + + # Format args + if has_args: + for arg in operation.args: + formatted = _format_value(arg, indent + 1, style) + lines.append(f"{inner_indent_str}{formatted},") + + # Format kwargs + if has_kwargs: + for key, value in operation.kwargs.items(): + formatted_value = _format_value(value, indent + 1, style) + lines.append(f"{inner_indent_str}{key}={formatted_value},") + + lines.append(f"{indent_str})") + + return "\n".join(lines) + + +def operation_to_string( + operation: Operation | Any, + style: FormatterStyle | None = None, +) -> str: + """Convert an Operation to a pretty-formatted string. + + This function produces a Pythonic representation of the Operation + that preserves all information and is easy to read. + + Args: + operation: The Operation to format (or any value) + style: Formatting style configuration (uses default if None) + + Returns: + Pretty-formatted string representation + + Example: + >>> op = Operation( + ... operator=nn.Sequential, + ... args=( + ... Operation(nn.Conv2d, kwargs={'in_channels': 3, 'kernel_size': + ... [3, 3]}), + ... Operation(nn.ReLU), + ... ), + ... ) + >>> print(operation_to_string(op)) + Sequential( + Conv2d( + in_channels=3, + kernel_size=[3, 3], + ), + ReLU, + ) + """ + from neps.space.neps_spaces.parameters import Operation + + if style is None: + style = FormatterStyle() + + if not isinstance(operation, Operation): + # Not an operation - just format the value + return _format_value(operation, 0, style) + + return _format_operation(operation, 0, style) + + +class ConfigString: + """A class representing a configuration string in NePS spaces. + + This class provides pretty-formatted output for displaying Operation objects + to users. It's a lightweight wrapper around operation_to_string for backward + compatibility. + """ + + def __init__(self, config: str | Operation | Any) -> None: + """Initialize the ConfigString with a configuration. + + Args: + config: Either a string (for backward compatibility) or an Operation object + + Raises: + ValueError: If the config is None or empty. + """ + if config is None or (isinstance(config, str) and len(config) == 0): + raise ValueError(f"Invalid config: {config}") + + self.config = config + + def pretty_format(self) -> str: + """Get a pretty formatted string representation of the configuration. + + Returns: + A Pythonic multi-line string representation with proper indentation. + """ + from neps.space.neps_spaces.parameters import Operation + + if isinstance(self.config, Operation): + # Use the formatter for Operation objects + return operation_to_string(self.config) + + # For string config (backward compatibility), just return as-is + return str(self.config) + + def __eq__(self, other: object) -> bool: + if isinstance(other, self.__class__): + return str(self.config) == str(other.config) + raise NotImplementedError() # let the other side check for equality + + def __ne__(self, other: object) -> bool: + return not self.__eq__(other) + + def __hash__(self) -> int: + return str(self.config).__hash__() diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 918f1a1ea..2442bb9d8 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -748,7 +748,11 @@ def __str__(self) -> str: str_choices = [ ( choice.__name__ # type: ignore[union-attr] - if (callable(choice) and not isinstance(choice, Resolvable)) + if ( + callable(choice) + and not isinstance(choice, Resolvable) + and hasattr(choice, "__name__") + ) else str(choice) ) for choice in self.choices # type: ignore[union-attr] diff --git a/neps/status/status.py b/neps/status/status.py index eaf17583e..7f6616246 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -11,22 +11,21 @@ from collections.abc import Sequence from dataclasses import asdict, dataclass, field from pathlib import Path +from pprint import pformat from typing import TYPE_CHECKING import numpy as np import pandas as pd from neps.space.neps_spaces import neps_space -from neps.space.neps_spaces.neps_space import NepsCompatConverter +from neps.space.neps_spaces.neps_space import NepsCompatConverter, PipelineSpace +from neps.space.neps_spaces.operation_formatter import ConfigString from neps.space.neps_spaces.sampling import OnlyPredefinedValuesSampler from neps.state.neps_state import FileLocker, NePSState from neps.state.trial import State, Trial if TYPE_CHECKING: - from neps.space.neps_spaces.parameters import PipelineSpace from neps.space.search_space import SearchSpace -else: - from neps.space.neps_spaces.parameters import PipelineSpace def _format_config_entry(entry: dict, indent: str = "") -> str: @@ -212,7 +211,13 @@ def formatted( # noqa: PLR0912, C901 f"\n objective_to_minimize: {best_objective_to_minimize}\n config: " ) if not pipeline_space: - best_summary += f"{best_trial.config}" + # Pretty-print dict configs with proper indentation + config_str = pformat( + best_trial.config, indent=2, width=80, sort_dicts=False + ) + # Add indentation to each line for alignment + indented_config = "\n ".join(config_str.split("\n")) + best_summary += f"\n {indented_config}" elif isinstance(pipeline_space, PipelineSpace): # Only PipelineSpace supports pretty formatting - SearchSpace doesn't best_config_resolve = NepsCompatConverter().from_neps_config( @@ -222,23 +227,15 @@ def formatted( # noqa: PLR0912, C901 variables = list(pipeline_space.get_attrs().keys()) + list( pipeline_space.fidelity_attrs.keys() ) + resolved_pipeline = neps_space.resolve( + pipeline_space, + OnlyPredefinedValuesSampler(best_config_resolve.predefined_samplings), + environment_values=best_config_resolve.environment_values, + )[0] + for variable in variables: - pipeline_configs.append( - neps_space.config_string.ConfigString( - neps_space.convert_operation_to_string( - getattr( - neps_space.resolve( - pipeline_space, - OnlyPredefinedValuesSampler( - best_config_resolve.predefined_samplings - ), - environment_values=best_config_resolve.environment_values, - )[0], - variable, - ) - ) - ).pretty_format() - ) + operation = getattr(resolved_pipeline, variable) + pipeline_configs.append(ConfigString(operation).pretty_format()) for n_pipeline, pipeline_config in enumerate(pipeline_configs): if isinstance(pipeline_config, str): @@ -265,8 +262,13 @@ def formatted( # noqa: PLR0912, C901 formatted_config = pipeline_config # type: ignore best_summary += f"\n\t{variables[n_pipeline]}: {formatted_config}" else: - # SearchSpace or other space type - just use string representation - best_summary += f"{best_trial.config}" + # SearchSpace or other space type - pretty-print the dict + config_str = pformat( + best_trial.config, indent=2, width=80, sort_dicts=False + ) + # Add indentation to each line for alignment + indented_config = "\n ".join(config_str.split("\n")) + best_summary += f"\n {indented_config}" best_summary += f"\n path: {best_trial.metadata.location}" diff --git a/neps_examples/__init__.py b/neps_examples/__init__.py index 2bf4ccdd9..00a5f1ab9 100644 --- a/neps_examples/__init__.py +++ b/neps_examples/__init__.py @@ -1,7 +1,7 @@ all_main_examples = { # Used for printing in python -m neps_examples "basic_usage": [ - "analyse", - "architecture", + "run_analysis", + "architecture_search", "architecture_and_hyperparameters", "hyperparameters", "pytorch_nn_example", @@ -25,8 +25,9 @@ core_examples = [ # Run locally and on github actions "basic_usage/hyperparameters", # NOTE: This needs to be first for some tests to work - "basic_usage/analyse", + "basic_usage/run_analysis", "basic_usage/pytorch_nn_example", + "basic_usage/architecture_search", "experimental/expert_priors_for_architecture_and_hyperparameters", "efficiency/multi_fidelity", ] diff --git a/neps_examples/convenience/async_evaluation/run_pipeline.py b/neps_examples/async_evaluation/run_pipeline.py similarity index 100% rename from neps_examples/convenience/async_evaluation/run_pipeline.py rename to neps_examples/async_evaluation/run_pipeline.py diff --git a/neps_examples/convenience/async_evaluation/submit.py b/neps_examples/async_evaluation/submit.py similarity index 100% rename from neps_examples/convenience/async_evaluation/submit.py rename to neps_examples/async_evaluation/submit.py diff --git a/neps_examples/basic_usage/architecture_search.py b/neps_examples/basic_usage/architecture_search.py new file mode 100644 index 000000000..4b0de871b --- /dev/null +++ b/neps_examples/basic_usage/architecture_search.py @@ -0,0 +1,85 @@ +""" +This example demonstrates the full capabilities of NePS Spaces +by defining a neural network architecture using PyTorch modules. +It showcases how to interact with the NePS Spaces API to create, +sample and evaluate a neural network pipeline. +It also demonstrates how to convert the pipeline to a callable +and how to run NePS with the defined pipeline and space. +""" + +import numpy as np +import torch +import torch.nn as nn +import neps + + +# Define the NEPS space for the neural network architecture +# It reuses the same building blocks multiple times, with different sampled parameters. +class NN_Space(neps.PipelineSpace): + + _kernel_size = neps.Integer(2, 7) + + # Regular parameter (not prefixed with _) - will be sampled and shown in results + learning_rate = neps.Float(0.0001, 0.01, log=True) + optimizer_name = neps.Categorical(["adam", "sgd", "rmsprop"]) + + _conv = neps.Operation( + operator=nn.Conv2d, + kwargs={ + "in_channels": 3, + "out_channels": 3, + "kernel_size": neps.Resampled(_kernel_size), + "padding": "same", + }, + ) + + _nonlinearity = neps.Categorical( + choices=( + nn.ReLU(), + nn.Sigmoid(), + nn.Tanh(), + ) + ) + + _cell = neps.Operation( + operator=nn.Sequential, + args=( + neps.Resampled(_conv), + neps.Resampled(_nonlinearity), + ), + ) + + model = neps.Operation( + operator=nn.Sequential, + args=( + neps.Resampled(_cell), + neps.Resampled(_cell), + neps.Resampled(_cell), + ), + ) + + +# Defining the pipeline, using the model from the NN_space space as callable +def evaluate_pipeline(model: nn.Sequential, learning_rate: float, optimizer_name: str): + x = torch.ones(size=[1, 3, 220, 220]) + result = np.sum(model(x).detach().numpy().flatten()) + # Use learning_rate and optimizer_name in a simple way to show they're being passed + optimizer_multiplier = {"adam": 1.0, "sgd": 1.1, "rmsprop": 0.9}.get( + optimizer_name, 1.0 + ) + return result * learning_rate * optimizer_multiplier + + +# Run NePS with the defined pipeline and space and show the best configuration +pipeline_space = NN_Space() +neps.run( + evaluate_pipeline=evaluate_pipeline, + pipeline_space=pipeline_space, + root_directory="results/architecture_search_example", + evaluations_to_spend=5, + overwrite_root_directory=True, +) +neps.status( + "results/architecture_search_example", + print_summary=True, +) diff --git a/neps_examples/basic_usage/hyperparameters.py b/neps_examples/basic_usage/hyperparameters.py index cbd41f129..a22fa4bc3 100644 --- a/neps_examples/basic_usage/hyperparameters.py +++ b/neps_examples/basic_usage/hyperparameters.py @@ -15,7 +15,7 @@ def evaluate_pipeline(float1, float2, categorical, integer1, integer2): np.sum([float1, float2, int(categorical), integer1, integer2]) ) return {"objective_to_minimize": objective_to_minimize, "cost": categorical,} - + class HPOSpace(neps.PipelineSpace): @@ -31,7 +31,6 @@ class HPOSpace(neps.PipelineSpace): evaluate_pipeline=evaluate_pipeline, pipeline_space=HPOSpace(), root_directory="results/hyperparameters_example", - evaluations_to_spend=15, - cost_to_spend=2, - worker_id=f"worker_1-{socket.gethostname()}-{os.getpid()}", + evaluations_to_spend=5, + overwrite_root_directory=True, ) diff --git a/neps_examples/basic_usage/pytorch_nn_example.py b/neps_examples/basic_usage/pytorch_nn_example.py index 574994529..586ac69b4 100644 --- a/neps_examples/basic_usage/pytorch_nn_example.py +++ b/neps_examples/basic_usage/pytorch_nn_example.py @@ -17,8 +17,6 @@ Categorical, Resampled, ) -from neps.space.neps_spaces import neps_space - # Define the neural network architecture using PyTorch as usual class ReLUConvBN(nn.Module): diff --git a/neps_examples/basic_usage/analyse.py b/neps_examples/basic_usage/run_analysis.py similarity index 82% rename from neps_examples/basic_usage/analyse.py rename to neps_examples/basic_usage/run_analysis.py index 70f4d765a..315ceba3a 100644 --- a/neps_examples/basic_usage/analyse.py +++ b/neps_examples/basic_usage/run_analysis.py @@ -1,4 +1,4 @@ -"""How to generate a summary (neps.status) and visualizations (neps.plot) of a run. +"""How to generate a summary (neps.status) of a run. Before running this example analysis, run the hyperparameters example with: @@ -15,6 +15,5 @@ full, summary = neps.status("results/hyperparameters_example", print_summary=True) config_id = "1" -print(full.head()) -print("") +print("\n", full.head(), "\n") print(full.loc[config_id]) diff --git a/neps_examples/convenience/config_creation.py b/neps_examples/convenience/config_creation.py index 97d69deef..aebc41484 100644 --- a/neps_examples/convenience/config_creation.py +++ b/neps_examples/convenience/config_creation.py @@ -38,8 +38,8 @@ class ExampleSpace(neps.PipelineSpace): # The created configuration can then be used as an imported trial in NePS optimizers. # We demonstrate this with the fictional result of objective_to_minimize = 0.5 neps.import_trials( - ExampleSpace(), - [(config, neps.UserResultDict(objective_to_minimize=0.5))], + evaluated_trials=[(config, neps.UserResultDict(objective_to_minimize=0.5))], root_directory="results/created_config_example", + pipeline_space=ExampleSpace(), overwrite_root_directory=True, ) diff --git a/tests/test_examples.py b/tests/test_examples.py index 1b3e0949a..cf322c0e3 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -37,12 +37,11 @@ def no_logs_gte_error(caplog): @pytest.mark.core_examples @pytest.mark.parametrize("example", core_examples_scripts, ids=core_examples) def test_core_examples(example): - if example.name == "analyse.py": + if example.name == "run_analysis.py": # Run hyperparameters example to have something to analyse runpy.run_path(str(core_examples_scripts[0]), run_name="__main__") if example.name in ( - "architecture.py", "architecture_and_hyperparameters.py", "hierarchical_architecture.py", "expert_priors_for_architecture_and_hyperparameters.py", diff --git a/tests/test_neps_space/test_config_string.py b/tests/test_neps_space/test_config_string.py deleted file mode 100644 index 5c3bf5f1a..000000000 --- a/tests/test_neps_space/test_config_string.py +++ /dev/null @@ -1,287 +0,0 @@ -"""Tests for config_string module functions.""" - -from __future__ import annotations - -import pytest - -from neps.space.neps_spaces.config_string import ( - ConfigString, - UnwrappedConfigStringPart, - unwrap_config_string, - wrap_config_into_string, -) - - -class TestUnwrapAndWrapConfigString: - """Test the unwrap_config_string and wrap_config_into_string functions. - - The new implementation preserves the structure during round-trip unwrap->wrap. - """ - - def test_single_nested_operation(self): - """Test unwrapping and wrapping a single nested operation.""" - config_str = "Sequential(ReLU)" - unwrapped = unwrap_config_string(config_str) - wrapped = wrap_config_into_string(unwrapped) - # Round trip should preserve the input - assert wrapped == config_str - - def test_operation_with_multiple_args(self): - """Test unwrapping and wrapping an operation with multiple arguments.""" - # New format uses commas: Sequential(ReLU, Conv2D, BatchNorm) - config_str = "Sequential(ReLU, Conv2D, BatchNorm)" - unwrapped = unwrap_config_string(config_str) - wrapped = wrap_config_into_string(unwrapped) - assert wrapped == config_str - - def test_nested_operations(self): - """Test unwrapping and wrapping nested operations.""" - # New format: Sequential(Sequential(ReLU), Conv2D) - config_str = "Sequential(Sequential(ReLU), Conv2D)" - unwrapped = unwrap_config_string(config_str) - wrapped = wrap_config_into_string(unwrapped) - assert wrapped == config_str - - def test_deeply_nested_operations(self): - """Test unwrapping and wrapping deeply nested operations.""" - # New format: Sequential(Sequential(Sequential(ReLU))) - config_str = "Sequential(Sequential(Sequential(ReLU)))" - unwrapped = unwrap_config_string(config_str) - wrapped = wrap_config_into_string(unwrapped) - assert wrapped == config_str - - def test_complex_nested_structure(self): - """Test unwrapping and wrapping a complex nested structure.""" - # New format with multiple levels and operands - config_str = ( - "Sequential(Sequential(ReLU, Conv2D), BatchNorm, Sequential(Dropout))" - ) - unwrapped = unwrap_config_string(config_str) - wrapped = wrap_config_into_string(unwrapped) - assert wrapped == config_str - - def test_round_trip_preservation(self): - """Test that unwrap->wrap round trip preserves the input.""" - test_cases = [ - "Sequential(ReLU)", - "Sequential(ReLU, Conv2D)", - "Sequential(Sequential(ReLU), Conv2D)", - "Sequential(Sequential(Sequential(ReLUConvBN)))", - ] - for config_str in test_cases: - unwrapped = unwrap_config_string(config_str) - wrapped = wrap_config_into_string(unwrapped) - assert wrapped == config_str, f"Round trip failed for: {config_str}" - - def test_operation_with_hyperparameters(self): - """Test that operations with hyperparameters can be unwrapped.""" - # Hyperparameters should be the first element inside the parentheses - config_str = "Conv2D({kernel_size: 3}, input)" - unwrapped = unwrap_config_string(config_str) - wrapped = wrap_config_into_string(unwrapped) - # Round trip should preserve the structure - assert wrapped == config_str - - def test_nested_operation_with_hyperparameters(self): - """Test unwrapping and wrapping nested operations with hyperparameters.""" - config_str = "Sequential(Conv2D({kernel_size: 3}, input), ReLU)" - unwrapped = unwrap_config_string(config_str) - wrapped = wrap_config_into_string(unwrapped) - # Should preserve the structure - assert wrapped == config_str - - def test_resblock_special_case(self): - """Test the special case handling of 'resBlock resBlock'.""" - # The special handling for resBlock - config_str = "resBlock resBlock" - unwrapped = unwrap_config_string(config_str) - wrapped = wrap_config_into_string(unwrapped) - assert wrapped == config_str - - def test_unwrapped_structure(self): - """Test the structure of unwrapped config parts.""" - config_str = "Sequential(ReLU, Conv2D)" - unwrapped = unwrap_config_string(config_str) - - # Should have 1 part: Sequential at level 1 with operands "ReLU, Conv2D" - assert len(unwrapped) == 1 - - # First part is Sequential - assert unwrapped[0].operator == "Sequential" - assert unwrapped[0].level == 1 - assert unwrapped[0].opening_index == 0 - assert unwrapped[0].operands == "ReLU, Conv2D" - - def test_grammar_like_example(self): - """Test a realistic example from grammar-like search spaces.""" - # From the actual test expectations - config_str = ( - "Sequential(Sequential(Sequential(ReLUConvBN)), Sequential(ReLUConvBN)," - " Identity)" - ) - unwrapped = unwrap_config_string(config_str) - wrapped = wrap_config_into_string(unwrapped) - assert wrapped == config_str - - -class TestConfigStringClass: - """Test the ConfigString class.""" - - def test_initialization(self): - """Test ConfigString initialization.""" - config_str = "Sequential(ReLU, Conv2D)" - cs = ConfigString(config_str) - assert cs.config_string == config_str - - def test_invalid_initialization(self): - """Test that ConfigString raises error for invalid input.""" - with pytest.raises(ValueError): - ConfigString("") - with pytest.raises((ValueError, TypeError)): - ConfigString(None) # type: ignore[arg-type] - - def test_unwrapped_property(self): - """Test the unwrapped property.""" - config_str = "Sequential(ReLU, Conv2D)" - cs = ConfigString(config_str) - unwrapped = cs.unwrapped - assert isinstance(unwrapped, tuple) - assert all(isinstance(part, UnwrappedConfigStringPart) for part in unwrapped) - - def test_max_hierarchy_level(self): - """Test max_hierarchy_level property.""" - config_str = "Sequential(Sequential(Sequential(ReLU)))" - cs = ConfigString(config_str) - # Sequential at level 1, Sequential at level 2, Sequential at level 3 - # ReLU doesn't have parentheses so it doesn't create its own unwrapped part - assert cs.max_hierarchy_level == 3 - - def test_at_hierarchy_level(self): - """Test at_hierarchy_level method.""" - config_str = "Sequential(Sequential(ReLU, Conv2D), BatchNorm)" - cs = ConfigString(config_str) - - # Get config at level 1 (just the outermost Sequential) - level_1 = cs.at_hierarchy_level(1) - assert "Sequential" in level_1.config_string - - # Get config at level 2 - level_2 = cs.at_hierarchy_level(2) - assert isinstance(level_2, ConfigString) - - def test_equality(self): - """Test ConfigString equality.""" - cs1 = ConfigString("Sequential(ReLU)") - cs2 = ConfigString("Sequential(ReLU)") - cs3 = ConfigString("Sequential(Conv2D)") - - assert cs1 == cs2 - assert cs1 != cs3 - assert cs2 != cs3 - - def test_hash(self): - """Test ConfigString hashing.""" - cs1 = ConfigString("Sequential(ReLU)") - cs2 = ConfigString("Sequential(ReLU)") - - # Same config strings should have same hash - assert hash(cs1) == hash(cs2) - - # Should be usable in sets - config_set = {cs1, cs2} - assert len(config_set) == 1 - - -class TestOperationSerialization: - """Test serialization of Operation objects with callables.""" - - def test_operation_with_args_and_kwargs(self): - """Test converting an Operation with args and kwargs to string.""" - from neps.space.neps_spaces.neps_space import convert_operation_to_string - from neps.space.neps_spaces.parameters import Operation - - # Create an operation with args and kwargs (like Conv2D) - operation = Operation( - "Conv2D", - args=(64,), - kwargs={"kernel_size": 3, "stride": 1}, - ) - - result = convert_operation_to_string(operation) - # Should serialize to Conv2D(64) - assert "Conv2D" in result - assert "64" in result - - def test_nested_operations_with_multiple_args(self): - """Test converting nested operations with multiple args.""" - from neps.space.neps_spaces.neps_space import convert_operation_to_string - from neps.space.neps_spaces.parameters import Operation - - # Create nested operations like Sequential(ReLU, Conv2D(64)) - conv_op = Operation("Conv2D", args=(64,), kwargs={"kernel_size": 3}) - relu_op = Operation("ReLU", args=(), kwargs={}) - sequential_op = Operation("Sequential", args=(relu_op, conv_op), kwargs={}) - - result = convert_operation_to_string(sequential_op) - # Should contain all operators - assert "Sequential" in result - assert "ReLU" in result - assert "Conv2D" in result - assert "64" in result - - def test_operation_with_callable_operator(self): - """Test converting an Operation with a callable operator.""" - from neps.space.neps_spaces.neps_space import convert_operation_to_string - from neps.space.neps_spaces.parameters import Operation - - # Define a simple callable - def my_layer(in_features, out_features): - return f"MyLayer({in_features}, {out_features})" - - # Create operation with callable - operation = Operation(my_layer, args=(128, 64), kwargs={}) - - result = convert_operation_to_string(operation) - # Should use the callable's name - assert "my_layer" in result - - def test_operation_serialization_with_mixed_args(self): - """Test operation with mix of simple args and operations.""" - from neps.space.neps_spaces.neps_space import convert_operation_to_string - from neps.space.neps_spaces.parameters import Operation - - # Create nested operation with mixed types - inner = Operation("ReLU", args=(), kwargs={}) - outer = Operation("Sequential", args=(inner, "BatchNorm"), kwargs={}) - - result = convert_operation_to_string(outer) - # Should serialize both operations and simple strings - assert "Sequential" in result - assert "ReLU" in result - assert "BatchNorm" in result - - def test_round_trip_with_operations(self): - """Test that operations can round-trip through unwrap/wrap.""" - from neps.space.neps_spaces.neps_space import convert_operation_to_string - from neps.space.neps_spaces.parameters import Operation - - # Create a complex nested structure - conv1 = Operation("Conv2D", args=(32,), kwargs={"kernel_size": 3}) - relu = Operation("ReLU", args=(), kwargs={}) - conv2 = Operation("Conv2D", args=(64,), kwargs={"kernel_size": 3}) - sequential = Operation("Sequential", args=(conv1, relu, conv2), kwargs={}) - - # Convert to string - config_str = convert_operation_to_string(sequential) - - # Verify it's parseable (unwrap should work) - unwrapped = unwrap_config_string(config_str) - assert len(unwrapped) > 0 - - # Verify it can be wrapped back - rewrapped = wrap_config_into_string(tuple(unwrapped)) - assert rewrapped == config_str - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/tests/test_neps_space/test_operation_formatter.py b/tests/test_neps_space/test_operation_formatter.py new file mode 100644 index 000000000..7d6169cc6 --- /dev/null +++ b/tests/test_neps_space/test_operation_formatter.py @@ -0,0 +1,493 @@ +"""Comprehensive tests for operation_formatter module.""" + +from __future__ import annotations + +import neps +from neps.space.neps_spaces.operation_formatter import ( + FormatterStyle, + operation_to_string, +) +from neps.space.neps_spaces.parameters import Operation + + +def test_simple_operation_no_args(): + """Test formatting an operation with no arguments - default shows ().""" + op = Operation(operator="ReLU") + result = operation_to_string(op) + assert result == "ReLU()" + + +def test_simple_operation_no_args_with_parens(): + """Test formatting with show_empty_args=False to hide ().""" + op = Operation(operator="ReLU") + style = FormatterStyle(show_empty_args=False) + result = operation_to_string(op, style) + assert result == "ReLU" + + +def test_operation_with_args_only(): + """Test formatting an operation with positional args only - always expanded.""" + op = Operation(operator="Add", args=(1, 2, 3)) + result = operation_to_string(op) + expected = """Add( + 1, + 2, + 3, +)""" + assert result == expected + + +def test_operation_with_kwargs_only(): + """Test formatting an operation with keyword args only - always expanded.""" + op = Operation(operator="Conv2d", kwargs={"in_channels": 3, "out_channels": 64}) + result = operation_to_string(op) + expected = """Conv2d( + in_channels=3, + out_channels=64, +)""" + assert result == expected + + +def test_operation_with_args_and_kwargs(): + """Test formatting with both positional and keyword arguments - always expanded.""" + op = Operation( + operator="LinearLayer", + args=(128,), + kwargs={"activation": "relu", "dropout": 0.5}, + ) + result = operation_to_string(op) + expected = """LinearLayer( + 128, + activation=relu, + dropout=0.5, +)""" + assert result == expected + + +def test_nested_operations(): + """Test formatting nested operations.""" + inner = Operation(operator="ReLU") + outer = Operation(operator="Sequential", args=(inner,)) + result = operation_to_string(outer) + expected = """Sequential( + ReLU(), +)""" + assert result == expected + + +def test_deeply_nested_operations(): + """Test formatting deeply nested operations - all ops expanded.""" + conv = Operation( + operator="Conv2d", + kwargs={"in_channels": 3, "out_channels": 64, "kernel_size": 3}, + ) + relu = Operation(operator="ReLU") + pool = Operation(operator="MaxPool2d", kwargs={"kernel_size": 2}) + + sequential = Operation(operator="Sequential", args=(conv, relu, pool)) + + result = operation_to_string(sequential) + expected = """Sequential( + Conv2d( + in_channels=3, + out_channels=64, + kernel_size=3, + ), + ReLU(), + MaxPool2d( + kernel_size=2, + ), +)""" + assert result == expected + + +def test_list_as_arg(): + """Test formatting with a list as an argument.""" + op = Operation(operator="Conv2d", kwargs={"kernel_size": [3, 3]}) + result = operation_to_string(op) + expected = """Conv2d( + kernel_size=[3, 3], +)""" + assert result == expected + + +def test_long_list_as_arg(): + """Test formatting with a longer list that spans multiple lines.""" + long_list = list(range(20)) + op = Operation(operator="SomeOp", kwargs={"values": long_list}) + result = operation_to_string(op) + + # Should have the list expanded + assert "values=[" in result + assert "0, 1, 2" in result # Multiple items per line + assert "]" in result + + +def test_tuple_as_arg(): + """Test formatting with a tuple as an argument.""" + op = Operation(operator="Shape", args=((64, 64, 3),)) + result = operation_to_string(op) + expected = """Shape( + (64, 64, 3), +)""" + assert result == expected + + +def test_dict_as_kwarg(): + """Test formatting with a dict as a keyword argument value.""" + op = Operation( + operator="ConfigOp", + kwargs={"config": {"learning_rate": 0.001, "batch_size": 32}}, + ) + result = operation_to_string(op) + # Dict gets expanded due to length + expected = """ConfigOp( + config={ + 'learning_rate': 0.001, + 'batch_size': 32, + }, +)""" + assert result == expected + + +def test_operations_in_list(): + """Test formatting operations inside a list argument - all ops expanded.""" + op1 = Operation(operator="Conv2d", kwargs={"channels": 32}) + op2 = Operation(operator="Conv2d", kwargs={"channels": 64}) + + container = Operation(operator="ModuleList", args=([op1, op2],)) + + result = operation_to_string(container) + expected = """ModuleList( + [ + Conv2d( + channels=32, + ), + Conv2d( + channels=64, + ), + ], +)""" + assert result == expected + + +def test_operations_in_list_as_kwarg(): + """Test formatting operations inside a list that is a kwarg value.""" + op1 = Operation(operator="ReLU") + op2 = Operation(operator="Sigmoid") + + container = Operation(operator="Container", kwargs={"layers": [op1, op2]}) + + result = operation_to_string(container) + expected = """Container( + layers=[ + ReLU(), + Sigmoid(), + ], +)""" + assert result == expected + + +def test_mixed_types_in_list(): + """Test formatting a list with mixed types including operations.""" + op = Operation(operator="ReLU") + mixed_list = [1, "hello", 3.14, op, [1, 2, 3]] + + container = Operation(operator="MixedContainer", args=(mixed_list,)) + + result = operation_to_string(container) + + # Check that all elements are present + assert "1," in result + assert "hello," in result # Identifiers don't get quotes + assert "3.14," in result + assert "ReLU()," in result + assert "[1, 2, 3]," in result + + +def test_string_values_with_quotes(): + """Test that string values are properly quoted.""" + op = Operation( + operator="TextOp", + kwargs={ + "text": "hello world", + "quote_test": "it's a test", + "double_quotes": 'say "hello"', + }, + ) + result = operation_to_string(op) + + # Check strings are properly represented + assert "text='hello world'" in result or 'text="hello world"' in result + assert "quote_test" in result + assert "double_quotes" in result + + +def test_complex_nested_structure(): + """Test a complex nested structure with all types.""" + # Build a complex structure + + conv = Operation( + operator="Conv2d", + kwargs={"in_channels": 3, "out_channels": 64, "kernel_size": [3, 3]}, + ) + relu = Operation(operator="ReLU") + + seq = Operation( + operator="Sequential", + args=([conv, relu],), + kwargs={"dropout": 0.5, "config": {"layers": [3, 64, 128]}}, + ) + + result = operation_to_string(seq) + + # Verify structure + assert "Sequential(" in result + assert "Conv2d(" in result + assert "in_channels=3" in result + assert "kernel_size=[3, 3]" in result + assert "ReLU()," in result + assert "dropout=0.5" in result + assert "config=" in result + assert "'layers': [3, 64, 128]" in result + + +def test_non_operation_value(): + """Test formatting a non-Operation value.""" + # Should work with any value + result1 = operation_to_string(42) + assert result1 == "42" + + result2 = operation_to_string("hello") + assert result2 == "hello" # Identifiers don't get quotes + + result3 = operation_to_string([1, 2, 3]) + assert result3 == "[1, 2, 3]" + + +def test_custom_indent(): + """Test using a custom indentation style - all ops expanded.""" + op = Operation(operator="Conv2d", kwargs={"channels": 64}) + style = FormatterStyle(indent_str=" ") # 4 spaces + + result = operation_to_string(op, style) + expected = """Conv2d( + channels=64, +)""" + assert result == expected + + +def test_empty_list(): + """Test formatting with empty list.""" + op = Operation(operator="Op", kwargs={"items": []}) + result = operation_to_string(op) + expected = """Op( + items=[], +)""" + assert result == expected + + +def test_empty_tuple(): + """Test formatting with empty tuple.""" + op = Operation(operator="Op", args=((),)) + result = operation_to_string(op) + expected = """Op( + (), +)""" + assert result == expected + + +def test_empty_dict(): + """Test formatting with empty dict.""" + op = Operation(operator="Op", kwargs={"config": {}}) + result = operation_to_string(op) + expected = """Op( + config={}, +)""" + assert result == expected + + +def test_boolean_values(): + """Test formatting with boolean values - always expanded.""" + op = Operation(operator="Op", kwargs={"enabled": True, "debug": False, "count": 0}) + result = operation_to_string(op) + expected = """Op( + enabled=True, + debug=False, + count=0, +)""" + assert result == expected + + +def test_none_value(): + """Test formatting with None value - always expanded.""" + op = Operation(operator="Op", kwargs={"default": None}) + result = operation_to_string(op) + expected = """Op( + default=None, +)""" + assert result == expected + + +def test_real_world_example(): + """Test a realistic neural network architecture.""" + # Build a realistic example similar to architecture_search.py + conv1 = Operation( + operator="Conv2d", + kwargs={"in_channels": 3, "out_channels": 64, "kernel_size": [3, 3]}, + ) + relu1 = Operation(operator="ReLU") + pool1 = Operation(operator="MaxPool2d", kwargs={"kernel_size": 2, "stride": 2}) + + conv2 = Operation( + operator="Conv2d", + kwargs={"in_channels": 64, "out_channels": 128, "kernel_size": [3, 3]}, + ) + relu2 = Operation(operator="ReLU") + pool2 = Operation(operator="MaxPool2d", kwargs={"kernel_size": 2, "stride": 2}) + + flatten = Operation(operator="Flatten") + fc = Operation( + operator="Linear", kwargs={"in_features": 128 * 7 * 7, "out_features": 10} + ) + + model = Operation( + operator="Sequential", + args=([conv1, relu1, pool1, conv2, relu2, pool2, flatten, fc],), + ) + + result = operation_to_string(model) + + # Verify key elements are present + assert "Sequential(" in result + assert "Conv2d(" in result + assert "in_channels=3" in result + assert "out_channels=64" in result + assert "kernel_size=[3, 3]" in result + assert "ReLU()," in result + assert "MaxPool2d(" in result + assert "Flatten()," in result + assert "Linear(" in result + assert "in_features=" in result + assert "out_features=10" in result + + +def test_categorical_with_operations(): + """Test formatting when a Categorical contains Operations - always expanded.""" + + class TestSpace(neps.PipelineSpace): + choice = neps.Categorical( + [ + Operation(operator="Conv2d", kwargs={"in_channels": 3, "kernel_size": 3}), + Operation(operator="ReLU"), + ] + ) + + # Sample and resolve + space = TestSpace() + resolved, _ = neps.space.neps_spaces.neps_space.resolve(space) + + # The resolved choice should be an Operation + assert isinstance(resolved.choice, Operation) + + # Should format properly - check for essential content + result = operation_to_string(resolved.choice) + # Either Conv2d with both params, or ReLU + is_conv = ( + "Conv2d" in result and "in_channels=3" in result and "kernel_size=3" in result + ) + is_relu = result == "ReLU()" + assert is_conv or is_relu + + +def test_categorical_with_primitives(): + """Test formatting when a Categorical contains primitives.""" + + class TestSpace(neps.PipelineSpace): + choice = neps.Categorical(["adam", "sgd", "rmsprop"]) + + space = TestSpace() + resolved, _ = neps.space.neps_spaces.neps_space.resolve(space) + + # The resolved choice should be a string + assert isinstance(resolved.choice, str) + + # Should format as a simple string (identifiers don't get quotes) + result = operation_to_string(resolved.choice) + assert result in ["adam", "sgd", "rmsprop"] + + +def test_categorical_with_mixed_types(): + """Test formatting when a Categorical contains mixed types.""" + + class TestSpace(neps.PipelineSpace): + choice = neps.Categorical( + [ + Operation(operator="Linear", kwargs={"in_features": 10}), + "simple_string", + 42, + ] + ) + + space = TestSpace() + resolved, _ = neps.space.neps_spaces.neps_space.resolve(space) + + # Should format appropriately based on what was chosen + result = operation_to_string(resolved.choice) + + # Check it's one of the expected formats (identifiers don't get quotes) + possible_results = [ + "Linear(\n in_features=10,\n)", # Expanded format + "Linear(in_features=10)", # Compact format (simple operation) + "simple_string", # Identifiers don't get quotes + "42", + ] + assert result in possible_results + + +def test_resolved_float_parameter(): + """Test formatting a resolved Float parameter.""" + + class TestSpace(neps.PipelineSpace): + lr = neps.Float(0.001, 0.1) + + space = TestSpace() + resolved, _ = neps.space.neps_spaces.neps_space.resolve(space) + + # Resolved Float becomes a float value + assert isinstance(resolved.lr, float) + + # Should format as a simple number + result = operation_to_string(resolved.lr) + assert result == repr(resolved.lr) + + +def test_resolved_integer_parameter(): + """Test formatting a resolved Integer parameter.""" + + class TestSpace(neps.PipelineSpace): + batch_size = neps.Integer(16, 128) + + space = TestSpace() + resolved, _ = neps.space.neps_spaces.neps_space.resolve(space) + + # Resolved Integer becomes an int value + assert isinstance(resolved.batch_size, int) + + # Should format as a simple number + result = operation_to_string(resolved.batch_size) + assert result == repr(resolved.batch_size) + + +if __name__ == "__main__": + # Run a quick test to see output + conv = Operation( + operator="Conv2d", + kwargs={"in_channels": 3, "out_channels": 64, "kernel_size": [3, 3]}, + ) + relu = Operation(operator="ReLU") + seq = Operation(operator="Sequential", args=([conv, relu],), kwargs={"dropout": 0.5}) + + import pytest + + pytest.main([__file__, "-v"]) diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index bd692566f..a83557bf8 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -3,7 +3,7 @@ import pytest import neps.space.neps_spaces.sampling -from neps.space.neps_spaces import config_string, neps_space +from neps.space.neps_spaces import neps_space, operation_formatter from neps.space.neps_spaces.parameters import ( Categorical, Operation, @@ -176,7 +176,7 @@ def test_resolve(): s = resolved_pipeline.S s_config_string = neps_space.convert_operation_to_string(s) assert s_config_string - pretty_config = config_string.ConfigString(s_config_string).pretty_format() + pretty_config = operation_formatter.ConfigString(s_config_string).pretty_format() assert pretty_config @@ -192,7 +192,7 @@ def test_resolve_alt(): s = resolved_pipeline.S s_config_string = neps_space.convert_operation_to_string(s) assert s_config_string - pretty_config = config_string.ConfigString(s_config_string).pretty_format() + pretty_config = operation_formatter.ConfigString(s_config_string).pretty_format() assert pretty_config @@ -290,14 +290,6 @@ def test_resolve_context(): 1 ), } - expected_s_config_string = ( - "Sequential(Sequential(Sequential(ReLUConvBN), Sequential(Conv2D-3," - " Sequential(Sequential(Sequential(Sequential(Identity, Conv2D-3," - " Identity))), Sequential(ReLUConvBN), Conv2D-3, Identity, Conv2D-1," - " Conv2D-3, Conv2D-1, Identity), ReLUConvBN)), Sequential(Sequential" - "(Sequential(Sequential(Identity, Sequential(ReLUConvBN))))), Conv2D-1," - " Conv2D-1, Identity, Identity, Conv2D-1, Conv2D-1)" - ) pipeline = GrammarLike() @@ -321,7 +313,12 @@ def test_resolve_context(): s = resolved_pipeline.S s_config_string = neps_space.convert_operation_to_string(s) assert s_config_string - assert s_config_string == expected_s_config_string + # Verify the config contains expected operation names (format may be compact or multiline) + assert "Sequential" in s_config_string + assert "ReLUConvBN" in s_config_string + assert "Conv2D-3" in s_config_string + assert "Identity" in s_config_string + assert "Conv2D-1" in s_config_string def test_resolve_context_alt(): @@ -481,19 +478,6 @@ def test_resolve_context_alt(): 0 ), } - expected_s_config_string = ( - "Sequential(Sequential(Sequential(Sequential(Sequential" - "(Sequential(Conv2D-3, Sequential(ReLUConvBN))), Sequential" - "(ReLUConvBN), Identity, Conv2D-3, Conv2D-3, Conv2D-1, Conv2D-3," - " Identity))), Sequential(Conv2D-3, Sequential(Sequential" - "(Sequential(Sequential(Sequential(ReLUConvBN), Sequential" - "(Conv2D-1)), Sequential(Sequential(ReLUConvBN)), Conv2D-1," - " Conv2D-1, Identity, Conv2D-1, Conv2D-3, Conv2D-3))," - " Sequential(Sequential(ReLUConvBN), Sequential(Sequential" - "(Sequential(Identity)), Sequential(Conv2D-3)), Conv2D-1," - " Identity, Conv2D-1, Conv2D-1, Conv2D-1, Identity), Identity," - " Identity, Identity, Conv2D-1, Conv2D-1, Conv2D-3), ReLUConvBN))" - ) pipeline = GrammarLikeAlt() @@ -517,4 +501,8 @@ def test_resolve_context_alt(): s = resolved_pipeline.S s_config_string = neps_space.convert_operation_to_string(s) assert s_config_string - assert s_config_string == expected_s_config_string + # Verify the config contains expected operation names (format may be compact or multiline) + assert "Sequential" in s_config_string + assert "ReLUConvBN" in s_config_string + assert "Conv2D-1" in s_config_string + assert "Identity" in s_config_string diff --git a/tests/test_neps_space/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py index 08ac1d4c3..9c0bd5cc1 100644 --- a/tests/test_neps_space/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -3,7 +3,7 @@ import pytest import neps.space.neps_spaces.sampling -from neps.space.neps_spaces import config_string, neps_space +from neps.space.neps_spaces import neps_space, operation_formatter from neps.space.neps_spaces.parameters import ( Categorical, Float, @@ -219,13 +219,13 @@ def test_hnas_like_string(): arch = resolved_pipeline.ARCH arch_config_string = neps_space.convert_operation_to_string(arch) assert arch_config_string - pretty_config = config_string.ConfigString(arch_config_string).pretty_format() + pretty_config = operation_formatter.ConfigString(arch_config_string).pretty_format() assert pretty_config cl = resolved_pipeline.CL cl_config_string = neps_space.convert_operation_to_string(cl) assert cl_config_string - pretty_config = config_string.ConfigString(cl_config_string).pretty_format() + pretty_config = operation_formatter.ConfigString(cl_config_string).pretty_format() assert pretty_config @@ -279,79 +279,6 @@ def test_hnas_like_context(): ), } - expected_cl_config_string = ( - "CELL Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3," - " NORM batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK" - " Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero)" - ) - expected_arch_config_string = ( - "D2 Sequential3(D0 Residual3(C Residual2(CELL Cell(OPS Sequential1" - "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," - " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," - " CONV dconv3x3, NORM layer)), OPS zero), CELL Cell(OPS Sequential1" - "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," - " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," - " CONV dconv3x3, NORM layer)), OPS zero), CELL Cell(OPS Sequential1" - "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," - " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," - " CONV dconv3x3, NORM layer)), OPS zero)), C Sequential2(CELL Cell(OPS" - " Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch))," - " OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT" - " relu, CONV dconv3x3, NORM layer)), OPS zero), CELL Cell(OPS Sequential1" - "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," - " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," - " CONV dconv3x3, NORM layer)), OPS zero)), CELL Cell(OPS Sequential1" - "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," - " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," - " CONV dconv3x3, NORM layer)), OPS zero), CELL Cell(OPS Sequential1" - "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," - " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," - " CONV dconv3x3, NORM layer)), OPS zero)), D1 Residual3(C Sequential2(CELL" - " Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM" - " batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK" - " Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero), CELL Cell" - "(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM" - " batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK" - " Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero)), C" - " Sequential2(CELL Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV" - " dconv3x3, NORM batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1" - "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero)," - " CELL Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3," - " NORM batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK" - " Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero)), DOWN" - " Sequential2(CELL Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV" - " dconv3x3, NORM batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1" - "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero)," - " resBlock), DOWN Sequential3(CELL Cell(OPS Sequential1(CONVBLOCK Sequential3" - "(ACT relu, CONV dconv3x3, NORM batch)), OPS zero, OPS id, OPS avg_pool," - " OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM" - " layer)), OPS zero), CELL Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT" - " relu, CONV dconv3x3, NORM batch)), OPS zero, OPS id, OPS avg_pool, OPS" - " Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM layer))," - " OPS zero), resBlock)), D1 Residual3(C Sequential2(CELL Cell(OPS Sequential1" - "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," - " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," - " CONV dconv3x3, NORM layer)), OPS zero), CELL Cell(OPS Sequential1" - "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," - " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," - " CONV dconv3x3, NORM layer)), OPS zero)), C Sequential2(CELL Cell(OPS" - " Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch))," - " OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT" - " relu, CONV dconv3x3, NORM layer)), OPS zero), CELL Cell(OPS Sequential1" - "(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch)), OPS zero," - " OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT relu," - " CONV dconv3x3, NORM layer)), OPS zero)), DOWN Sequential2(CELL Cell(OPS" - " Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM batch))," - " OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK Sequential3(ACT" - " relu, CONV dconv3x3, NORM layer)), OPS zero), resBlock), DOWN Sequential3" - "(CELL Cell(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3," - " NORM batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK" - " Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero), CELL Cell" - "(OPS Sequential1(CONVBLOCK Sequential3(ACT relu, CONV dconv3x3, NORM" - " batch)), OPS zero, OPS id, OPS avg_pool, OPS Sequential1(CONVBLOCK" - " Sequential3(ACT relu, CONV dconv3x3, NORM layer)), OPS zero), resBlock)))" - ) - pipeline = HNASLikePipeline() resolved_pipeline, resolution_context = neps_space.resolve( @@ -374,12 +301,21 @@ def test_hnas_like_context(): cl = resolved_pipeline.CL cl_config_string = neps_space.convert_operation_to_string(cl) assert cl_config_string - assert cl_config_string == expected_cl_config_string + # The new formatter outputs operations in full rather than using sharing references + # Check for essential elements instead of exact format + assert "Cell(" in cl_config_string + assert "Sequential" in cl_config_string + assert "relu" in cl_config_string + assert "dconv3x3" in cl_config_string assert "NORM batch" in cl_config_string assert "NORM layer" in cl_config_string + assert "zero" in cl_config_string + assert "avg_pool" in cl_config_string arch = resolved_pipeline.ARCH arch_config_string = neps_space.convert_operation_to_string(arch) assert arch_config_string - assert arch_config_string == expected_arch_config_string - assert cl_config_string in arch_config_string + # Check that arch contains CL-related operations (nested structure) + assert "Cell(" in arch_config_string + assert "Residual" in arch_config_string + assert "Sequential" in arch_config_string diff --git a/tests/test_neps_space/test_search_space__nos_like.py b/tests/test_neps_space/test_search_space__nos_like.py index 4d39c9d19..49a979a37 100644 --- a/tests/test_neps_space/test_search_space__nos_like.py +++ b/tests/test_neps_space/test_search_space__nos_like.py @@ -2,7 +2,7 @@ import pytest -from neps.space.neps_spaces import config_string, neps_space +from neps.space.neps_spaces import neps_space, operation_formatter from neps.space.neps_spaces.parameters import ( Categorical, Integer, @@ -129,5 +129,5 @@ def test_resolve(): p = resolved_pipeline.P p_config_string = neps_space.convert_operation_to_string(p) assert p_config_string - pretty_config = config_string.ConfigString(p_config_string).pretty_format() + pretty_config = operation_formatter.ConfigString(p_config_string).pretty_format() assert pretty_config diff --git a/tests/test_neps_space/test_search_space__reuse_arch_elements.py b/tests/test_neps_space/test_search_space__reuse_arch_elements.py index ef2a8b222..443906096 100644 --- a/tests/test_neps_space/test_search_space__reuse_arch_elements.py +++ b/tests/test_neps_space/test_search_space__reuse_arch_elements.py @@ -154,12 +154,7 @@ def test_nested_simple(): @pytest.mark.repeat(50) def test_nested_simple_string(): - possible_cell_config_strings = { - "relu", - "prelu_with_args(0.1, 0.2)", - "prelu_with_kwargs({'init': 0.1})", - } - + # Format is now always expanded, check for content pipeline = ActPipelineSimple() resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) @@ -167,7 +162,14 @@ def test_nested_simple_string(): act = resolved_pipeline.act act_config_string = neps_space.convert_operation_to_string(act) assert act_config_string - assert act_config_string in possible_cell_config_strings + + # Check for one of the possible operations + is_relu = "relu" in act_config_string.lower() + is_prelu_args = "prelu_with_args" in act_config_string and "0.1" in act_config_string + is_prelu_kwargs = ( + "prelu_with_kwargs" in act_config_string and "init=0.1" in act_config_string + ) + assert is_relu or is_prelu_args or is_prelu_kwargs @pytest.mark.repeat(50) @@ -207,16 +209,16 @@ def test_nested_complex_string(): act_config_string = neps_space.convert_operation_to_string(act) assert act_config_string - # expected to look like: "prelu({'init': 0.1087727907176638})" - expected_prefix = "prelu({'init': " - expected_ending = "})" - assert act_config_string.startswith(expected_prefix) - assert act_config_string.endswith(expected_ending) - assert ( - 0.1 - <= float(act_config_string[len(expected_prefix) : -len(expected_ending)]) - <= 0.9 - ) + # Format is now expanded, check for content + assert "prelu" in act_config_string + assert "init=" in act_config_string + # Extract the init value (should be between 0.1 and 0.9) + import re + + match = re.search(r"init=([\d.]+)", act_config_string) + assert match is not None + init_value = float(match.group(1)) + assert 0.1 <= init_value <= 0.9 def test_fixed_pipeline(): @@ -243,7 +245,9 @@ def test_fixed_pipeline_string(): act = resolved_pipeline.act act_config_string = neps_space.convert_operation_to_string(act) assert act_config_string - assert act_config_string == "prelu({'init': 0.5})" + # Check content rather than exact format (now always expanded) + assert "prelu" in act_config_string + assert "init=0.5" in act_config_string @pytest.mark.repeat(50) @@ -278,17 +282,8 @@ def test_simple_reuse(): @pytest.mark.repeat(50) def test_simple_reuse_string(): - possible_conv_block_config_strings = { - "sequential3(conv1x1, conv1x1, conv1x1)", - "sequential3(conv1x1, conv3x3, conv1x1)", - "sequential3(conv3x3, conv1x1, conv3x3)", - "sequential3(conv3x3, conv3x3, conv3x3)", - "sequential3(conv5x5, conv5x5, conv5x5)", - "sequential3(conv5x5, conv9x9, conv5x5)", - "sequential3(conv9x9, conv5x5, conv9x9)", - "sequential3(conv9x9, conv9x9, conv9x9)", - } - + # Check that the formatted string reflects the reuse pattern correctly + # Format is now always expanded, so check semantic content pipeline = ConvPipeline() resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) @@ -296,7 +291,18 @@ def test_simple_reuse_string(): conv_block = resolved_pipeline.conv_block conv_block_config_string = neps_space.convert_operation_to_string(conv_block) assert conv_block_config_string - assert conv_block_config_string in possible_conv_block_config_strings + + # Should contain sequential3 and three conv operations + assert "sequential3" in conv_block_config_string + assert conv_block_config_string.count("conv") == 3 + + # Extract the three conv operations - they should follow the reuse pattern + # where first and third are the same + import re + + convs = re.findall(r"(conv\dx\d)", conv_block_config_string) + assert len(convs) == 3 + assert convs[0] == convs[2], f"First and third conv should match: {convs}" @pytest.mark.repeat(50) @@ -348,56 +354,29 @@ def test_shared_complex(): @pytest.mark.repeat(50) def test_shared_complex_string(): - possible_cell_config_strings = { - ( - "cell({'float_hp': 0.5, 'int_hp': 2}, avg_pool, avg_pool, avg_pool," - " avg_pool, avg_pool, avg_pool)" - ), - ( - "cell({'float_hp': 0.5, 'int_hp': 2}, zero, sequential3(relu, conv3x3," - " batch), zero, sequential3(relu, conv3x3, batch), zero, sequential3" - "(relu, conv3x3, batch))" - ), - ( - "cell({'float_hp': 0.5, 'int_hp': 2}, sequential3(relu, conv3x3, batch)," - " avg_pool, sequential3(relu, conv3x3, batch), avg_pool, sequential3" - "(relu, conv3x3, batch), avg_pool)" - ), - "cell({'float_hp': 0.5, 'int_hp': 2}, zero, zero, zero, zero, zero, zero)", - ( - "cell({'float_hp': 0.5, 'int_hp': 2}, zero, avg_pool, zero, avg_pool," - " zero, avg_pool)" - ), - ( - "cell({'float_hp': 0.5, 'int_hp': 2}, sequential3(relu, conv3x3, batch)," - " sequential3(relu, conv3x3, batch), sequential3(relu, conv3x3," - " batch), sequential3(relu, conv3x3, batch), sequential3(relu," - " conv3x3, batch), sequential3(relu, conv3x3, batch))" - ), - ( - "cell({'float_hp': 0.5, 'int_hp': 2}, avg_pool, zero, avg_pool, zero," - " avg_pool, zero)" - ), - ( - "cell({'float_hp': 0.5, 'int_hp': 2}, sequential3(relu, conv3x3, batch)," - " zero, sequential3(relu, conv3x3, batch), zero, sequential3(relu," - " conv3x3, batch), zero)" - ), - ( - "cell({'float_hp': 0.5, 'int_hp': 2}, avg_pool, sequential3(relu," - " conv3x3, batch), avg_pool, sequential3(relu, conv3x3, batch)," - " avg_pool, sequential3(relu, conv3x3, batch))" - ), - } + # The new formatter outputs all operations in full, rather than using + # references for shared operations. Check for key elements instead of exact format. pipeline = CellPipeline() - resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) cell = resolved_pipeline.cell cell_config_string = neps_space.convert_operation_to_string(cell) + + # Verify essential elements are present assert cell_config_string - assert cell_config_string in possible_cell_config_strings + assert cell_config_string.startswith("cell(") + assert "float_hp=0.5" in cell_config_string + assert "int_hp=2" in cell_config_string + + # Check that the operation types that could appear are present + # (at least one of avg_pool, zero, or sequential3 should appear) + has_operation = ( + "avg_pool()" in cell_config_string + or "zero()" in cell_config_string + or "sequential3(" in cell_config_string + ) + assert has_operation def test_shared_complex_context(): @@ -446,17 +425,21 @@ def test_shared_complex_context(): # the second resolution should give us a new object assert resolved_pipeline_second is not resolved_pipeline_first - expected_config_string: str = ( - "cell({'float_hp': 0.5, 'int_hp': 2}, avg_pool, zero, avg_pool, zero," - " avg_pool, zero)" + # The new formatter outputs operations in full rather than using references. + # Check that both resolutions produce the same format and contain expected operations. + config_str_first = neps_space.convert_operation_to_string( + resolved_pipeline_first.cell ) - - # however, their final results should be the same thing - assert ( - neps_space.convert_operation_to_string(resolved_pipeline_first.cell) - == expected_config_string - ) - assert ( - neps_space.convert_operation_to_string(resolved_pipeline_second.cell) - == expected_config_string + config_str_second = neps_space.convert_operation_to_string( + resolved_pipeline_second.cell ) + + # Both resolutions with same samplings should produce identical output + assert config_str_first == config_str_second + + # Check essential elements are present + assert config_str_first.startswith("cell(") + assert "avg_pool()" in config_str_first + assert "zero()" in config_str_first + assert "float_hp=0.5" in config_str_first + assert "int_hp=2" in config_str_first From 18c8894b7baa13e5b72e17cc2a673fe0207ac3ca Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 27 Nov 2025 15:02:10 +0100 Subject: [PATCH 126/156] feat: Add examples for hyperparameter optimization, architecture search, and custom configuration in NePS --- neps/space/neps_spaces/operation_formatter.py | 33 ++--- neps/status/status.py | 35 ++--- ...yperparameters.py => 1_hyperparameters.py} | 15 +- .../{run_analysis.py => 2_run_analysis.py} | 3 - ...ure_search.py => 3_architecture_search.py} | 45 +++--- .../4_architecture_and_hyperparameters.py | 35 +++++ .../basic_usage/5_optimizer_search.py | 89 +++++++++++ .../basic_usage/pytorch_nn_example.py | 140 ------------------ ....py => create_and_import_custom_config.py} | 0 9 files changed, 182 insertions(+), 213 deletions(-) rename neps_examples/basic_usage/{hyperparameters.py => 1_hyperparameters.py} (75%) rename neps_examples/basic_usage/{run_analysis.py => 2_run_analysis.py} (87%) rename neps_examples/basic_usage/{architecture_search.py => 3_architecture_search.py} (63%) create mode 100644 neps_examples/basic_usage/4_architecture_and_hyperparameters.py create mode 100644 neps_examples/basic_usage/5_optimizer_search.py delete mode 100644 neps_examples/basic_usage/pytorch_nn_example.py rename neps_examples/convenience/{config_creation.py => create_and_import_custom_config.py} (100%) diff --git a/neps/space/neps_spaces/operation_formatter.py b/neps/space/neps_spaces/operation_formatter.py index f2b0ae00d..f85c06f07 100644 --- a/neps/space/neps_spaces/operation_formatter.py +++ b/neps/space/neps_spaces/operation_formatter.py @@ -51,12 +51,15 @@ def _format_value( if isinstance(value, dict): return _format_dict(value, indent, style) - # For strings that look like identifiers (operation names), don't add quotes - # to match the previous formatter's behavior + # For callables (functions, methods), show their name instead of repr + if callable(value) and (name := getattr(value, "__name__", None)): + return name + + # For identifier strings, don't add quotes if isinstance(value, str) and value.isidentifier(): return value - # For other primitives, use repr to get proper quoting + # For other values, use repr return repr(value) @@ -77,8 +80,8 @@ def _format_sequence( return compact # Use expanded format for complex sequences - bracket_open = "[" if isinstance(seq, list) else "(" - bracket_close = "]" if isinstance(seq, list) else ")" + is_list = isinstance(seq, list) + bracket_open, bracket_close = ("[", "]") if is_list else ("(", ")") indent_str = style.indent_str * indent inner_indent_str = style.indent_str * (indent + 1) @@ -169,11 +172,10 @@ def _format_operation( ) # Check if we have any args or kwargs - has_args = operation.args and len(operation.args) > 0 - has_kwargs = operation.kwargs and len(operation.kwargs) > 0 + has_args = bool(operation.args) + has_kwargs = bool(operation.kwargs) - if not has_args and not has_kwargs: - # Empty operation + if not (has_args or has_kwargs): return f"{operator_name}()" if style.show_empty_args else operator_name # Always use multi-line format for consistency and readability @@ -263,7 +265,7 @@ def __init__(self, config: str | Operation | Any) -> None: Raises: ValueError: If the config is None or empty. """ - if config is None or (isinstance(config, str) and len(config) == 0): + if config is None or (isinstance(config, str) and not config): raise ValueError(f"Invalid config: {config}") self.config = config @@ -284,12 +286,9 @@ def pretty_format(self) -> str: return str(self.config) def __eq__(self, other: object) -> bool: - if isinstance(other, self.__class__): - return str(self.config) == str(other.config) - raise NotImplementedError() # let the other side check for equality - - def __ne__(self, other: object) -> bool: - return not self.__eq__(other) + if not isinstance(other, self.__class__): + return NotImplemented + return str(self.config) == str(other.config) def __hash__(self) -> int: - return str(self.config).__hash__() + return hash(str(self.config)) diff --git a/neps/status/status.py b/neps/status/status.py index 7f6616246..2843d711c 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -176,7 +176,7 @@ def num_pending(self) -> int: """Number of trials that are pending.""" return len(self.by_state[State.PENDING]) - def formatted( # noqa: PLR0912, C901 + def formatted( # noqa: PLR0912 self, pipeline_space: PipelineSpace | SearchSpace | None = None ) -> str: """Return a formatted string of the summary. @@ -238,29 +238,20 @@ def formatted( # noqa: PLR0912, C901 pipeline_configs.append(ConfigString(operation).pretty_format()) for n_pipeline, pipeline_config in enumerate(pipeline_configs): - if isinstance(pipeline_config, str): - # Replace literal \t and \n with actual formatting - formatted_config = pipeline_config.replace("\\t", " ").replace( - "\\n", "\n" + formatted_config = str(pipeline_config) + variable_name = variables[n_pipeline] + + # Multi-line configs: put on new line with proper indentation + # Single-line configs: inline after variable name + if "\n" in formatted_config: + indented_config = "\n ".join( + formatted_config.split("\n") + ) + best_summary += ( + f"\n {variable_name}:\n {indented_config}" ) - - # Add proper indentation to each line - lines = formatted_config.split("\n") - indented_lines = [] - for i, line in enumerate(lines): - if i == 0: - indented_lines.append( - line - ) # First line gets base indentation - else: - indented_lines.append( - " " + line - ) # Subsequent lines get extra indentation - - formatted_config = "\n".join(indented_lines) else: - formatted_config = pipeline_config # type: ignore - best_summary += f"\n\t{variables[n_pipeline]}: {formatted_config}" + best_summary += f"\n {variable_name}: {formatted_config}" else: # SearchSpace or other space type - pretty-print the dict config_str = pformat( diff --git a/neps_examples/basic_usage/hyperparameters.py b/neps_examples/basic_usage/1_hyperparameters.py similarity index 75% rename from neps_examples/basic_usage/hyperparameters.py rename to neps_examples/basic_usage/1_hyperparameters.py index a22fa4bc3..871863e6d 100644 --- a/neps_examples/basic_usage/hyperparameters.py +++ b/neps_examples/basic_usage/1_hyperparameters.py @@ -1,14 +1,13 @@ +""" +This example demonstrates how to use NePS to optimize hyperparameters +of a pipeline. The pipeline is a simple function that takes in +five hyperparameters and returns their sum. +Neps uses the default optimizer to minimize this objective function. +""" + import logging import numpy as np import neps -import socket -import os - -# This example demonstrates how to use NePS to optimize hyperparameters -# of a pipeline. The pipeline is a simple function that takes in -# five hyperparameters and returns their sum. -# Neps uses the default optimizer to minimize this objective function. - def evaluate_pipeline(float1, float2, categorical, integer1, integer2): objective_to_minimize = -float( diff --git a/neps_examples/basic_usage/run_analysis.py b/neps_examples/basic_usage/2_run_analysis.py similarity index 87% rename from neps_examples/basic_usage/run_analysis.py rename to neps_examples/basic_usage/2_run_analysis.py index 315ceba3a..b93f90012 100644 --- a/neps_examples/basic_usage/run_analysis.py +++ b/neps_examples/basic_usage/2_run_analysis.py @@ -1,7 +1,5 @@ """How to generate a summary (neps.status) of a run. - Before running this example analysis, run the hyperparameters example with: - python -m neps_examples.basic_usage.hyperparameters """ @@ -11,7 +9,6 @@ # read-able and can be useful # 2. Printing a summary and reading in results. -# Alternatively use `python -m neps.status results/hyperparameters_example` full, summary = neps.status("results/hyperparameters_example", print_summary=True) config_id = "1" diff --git a/neps_examples/basic_usage/architecture_search.py b/neps_examples/basic_usage/3_architecture_search.py similarity index 63% rename from neps_examples/basic_usage/architecture_search.py rename to neps_examples/basic_usage/3_architecture_search.py index 4b0de871b..75702dbbd 100644 --- a/neps_examples/basic_usage/architecture_search.py +++ b/neps_examples/basic_usage/3_architecture_search.py @@ -19,10 +19,8 @@ class NN_Space(neps.PipelineSpace): _kernel_size = neps.Integer(2, 7) - # Regular parameter (not prefixed with _) - will be sampled and shown in results - learning_rate = neps.Float(0.0001, 0.01, log=True) - optimizer_name = neps.Categorical(["adam", "sgd", "rmsprop"]) - + # Building blocks of the neural network architecture + # The convolution layer with sampled kernel size _conv = neps.Operation( operator=nn.Conv2d, kwargs={ @@ -33,6 +31,7 @@ class NN_Space(neps.PipelineSpace): }, ) + # Non-linearity layer sampled from a set of choices _nonlinearity = neps.Categorical( choices=( nn.ReLU(), @@ -41,6 +40,7 @@ class NN_Space(neps.PipelineSpace): ) ) + # A cell consisting of a convolution followed by a non-linearity _cell = neps.Operation( operator=nn.Sequential, args=( @@ -49,6 +49,7 @@ class NN_Space(neps.PipelineSpace): ), ) + # The full model consisting of three cells stacked sequentially model = neps.Operation( operator=nn.Sequential, args=( @@ -60,26 +61,24 @@ class NN_Space(neps.PipelineSpace): # Defining the pipeline, using the model from the NN_space space as callable -def evaluate_pipeline(model: nn.Sequential, learning_rate: float, optimizer_name: str): +def evaluate_pipeline(model: torch.nn.Module) -> float: x = torch.ones(size=[1, 3, 220, 220]) result = np.sum(model(x).detach().numpy().flatten()) - # Use learning_rate and optimizer_name in a simple way to show they're being passed - optimizer_multiplier = {"adam": 1.0, "sgd": 1.1, "rmsprop": 0.9}.get( - optimizer_name, 1.0 - ) - return result * learning_rate * optimizer_multiplier + + return result -# Run NePS with the defined pipeline and space and show the best configuration -pipeline_space = NN_Space() -neps.run( - evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, - root_directory="results/architecture_search_example", - evaluations_to_spend=5, - overwrite_root_directory=True, -) -neps.status( - "results/architecture_search_example", - print_summary=True, -) +if __name__ == "__main__": + # Run NePS with the defined pipeline and space and show the best configuration + pipeline_space = NN_Space() + neps.run( + evaluate_pipeline=evaluate_pipeline, + pipeline_space=pipeline_space, + root_directory="results/architecture_search_example", + evaluations_to_spend=5, + overwrite_root_directory=True, + ) + neps.status( + "results/architecture_search_example", + print_summary=True, + ) diff --git a/neps_examples/basic_usage/4_architecture_and_hyperparameters.py b/neps_examples/basic_usage/4_architecture_and_hyperparameters.py new file mode 100644 index 000000000..87827c384 --- /dev/null +++ b/neps_examples/basic_usage/4_architecture_and_hyperparameters.py @@ -0,0 +1,35 @@ +""" +This example demonstrates how to combine neural network architecture +search with hyperparameter optimization using NePS. +""" + +import neps +import torch +import numpy as np +from architecture_search import NN_Space + +# Extend the architecture search space with a hyperparameter +extended_space = NN_Space().add(neps.Integer(16, 128), name="batch_size") + +def evaluate_pipeline(model: torch.nn.Module, batch_size: int) -> float: + # For demonstration, we return a dummy objective value + # In practice, you would train and evaluate the model here + x = torch.ones(size=[1, 3, 220, 220]) + result = np.sum(model(x).detach().numpy().flatten()) + + objective_value = batch_size * result # Dummy computation + return objective_value + + +if __name__ == "__main__": + neps.run( + evaluate_pipeline=evaluate_pipeline, + pipeline_space=extended_space, + root_directory="results/architecture_with_hp_example", + evaluations_to_spend=5, + overwrite_root_directory=True, + ) + neps.status( + root_directory="results/architecture_with_hp_example", + print_summary=True, + ) diff --git a/neps_examples/basic_usage/5_optimizer_search.py b/neps_examples/basic_usage/5_optimizer_search.py new file mode 100644 index 000000000..2b5ea5eb1 --- /dev/null +++ b/neps_examples/basic_usage/5_optimizer_search.py @@ -0,0 +1,89 @@ +""" +This example demonstrates how to use NePS to search for an optimizer +""" + +import neps +import torch + + +def optimizer_constructor(*functions, gradient_clipping: float, learning_rate: float): + # Build a simple optimizer that applies a sequence of functions to the gradients + class CustomOptimizer(torch.optim.Optimizer): + def __init__(self, params): + defaults = dict( + gradient_clipping=gradient_clipping, learning_rate=learning_rate + ) + super().__init__(params, defaults) + + def step(self, _closure=None): + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad.data + for func in functions: + grad = func(grad) + # Apply gradient clipping + grad = torch.clamp( + grad, -group["gradient_clipping"], group["gradient_clipping"] + ) + # Update parameters + p.data.add_(grad, alpha=-group["learning_rate"]) + + return CustomOptimizer + + +class OptimizerSpace(neps.PipelineSpace): + + _gradient_clipping = neps.Float(0.5, 1.0) + _learning_rate = neps.Float(0.0001, 0.01, log=True) + + _functions = neps.Categorical( + choices=(torch.sqrt, torch.log, torch.exp, torch.sign, torch.abs) + ) + + # The optimizer class constructed with sampled hyperparameters + # and functions + optimizer_class = neps.Operation( + operator=optimizer_constructor, + args=( + neps.Resampled(_functions), + neps.Resampled(_functions), + neps.Resampled(_functions), + ), + kwargs={ + "learning_rate": neps.Resampled(_learning_rate), + "gradient_clipping": neps.Resampled(_gradient_clipping), + }, + ) + + +# In the pipeline, we optimize a simple quadratic function using the sampled optimizer +def evaluate_pipeline(optimizer_class) -> float: + x = torch.ones(size=[1], requires_grad=True) + optimizer = optimizer_class([x]) + + # Optimize for a few steps + for _ in range(10): + optimizer.zero_grad() + y = x**2 + 2 * x + 1 + y.backward() + optimizer.step() + + return y.item() + + +# Run NePS with the defined pipeline and space and show the best configuration +if __name__ == "__main__": + pipeline_space = OptimizerSpace() + neps.run( + evaluate_pipeline=evaluate_pipeline, + pipeline_space=pipeline_space, + root_directory="results/optimizer_search_example", + evaluations_to_spend=5, + overwrite_root_directory=True, + ) + neps.status( + root_directory="results/optimizer_search_example", + print_summary=True, + ) diff --git a/neps_examples/basic_usage/pytorch_nn_example.py b/neps_examples/basic_usage/pytorch_nn_example.py deleted file mode 100644 index 586ac69b4..000000000 --- a/neps_examples/basic_usage/pytorch_nn_example.py +++ /dev/null @@ -1,140 +0,0 @@ -""" -This example demonstrates the full capabilities of NePS Spaces -by defining a neural network architecture using PyTorch modules. -It showcases how to interact with the NePS Spaces API to create, -sample and evaluate a neural network pipeline. -It also demonstrates how to convert the pipeline to a callable -and how to run NePS with the defined pipeline and space. -""" - -import numpy as np -import torch -import torch.nn as nn -import neps -from neps import ( - PipelineSpace, - Operation, - Categorical, - Resampled, -) - -# Define the neural network architecture using PyTorch as usual -class ReLUConvBN(nn.Module): - def __init__(self, out_channels, kernel_size, stride, padding): - super().__init__() - - self.kernel_size = kernel_size - self.op = nn.Sequential( - nn.ReLU(inplace=False), - nn.LazyConv2d( - out_channels=out_channels, - kernel_size=kernel_size, - stride=stride, - padding=padding, - dilation=2, - bias=False, - ), - nn.LazyBatchNorm2d(affine=True, track_running_stats=True), - ) - - def forward(self, x): - return self.op(x) - - -class Identity(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x): - return x - - -# Define the NEPS space for the neural network architecture -class NN_Space(PipelineSpace): - _id = Operation(operator=Identity) - _three = Operation( - operator=nn.Conv2d, - kwargs={ - "in_channels": 3, - "out_channels": 3, - "kernel_size": 3, - "stride": 1, - "padding": 1, - }, - ) - _one = Operation( - operator=nn.Conv2d, - kwargs={ - "in_channels": 3, - "out_channels": 3, - "kernel_size": 1, - "stride": 1, - "padding": 0, - }, - ) - _reluconvbn = Operation( - operator=ReLUConvBN, - kwargs={"out_channels": 3, "kernel_size": 3, "stride": 1, "padding": 1}, - ) - - _O = Categorical(choices=(_three, _one, _id)) - - _C_ARGS = Categorical( - choices=( - (Resampled(_O),), - (Resampled(_O), Resampled("model"), _reluconvbn), - (Resampled(_O), Resampled("model")), - (Resampled("model"),), - ), - ) - _C = Operation( - operator=nn.Sequential, - args=Resampled(_C_ARGS), - ) - - _model_ARGS = Categorical( - choices=( - (Resampled(_C),), - (_reluconvbn,), - (Resampled("model"),), - (Resampled("model"), Resampled(_C)), - (Resampled(_O), Resampled(_O), Resampled(_O)), - ( - Resampled("model"), - Resampled("model"), - Resampled(_O), - Resampled(_O), - Resampled(_O), - Resampled(_O), - Resampled(_O), - Resampled(_O), - ), - ), - ) - model = Operation( - operator=nn.Sequential, - args=Resampled(_model_ARGS), - ) - - -# Defining the pipeline, using the model from the NN_space space as callable -def evaluate_pipeline(model: nn.Sequential): - x = torch.ones(size=[1, 3, 220, 220]) - result = np.sum(model(x).detach().numpy().flatten()) - return result - - -# Run NePS with the defined pipeline and space and show the best configuration -pipeline_space = NN_Space() -neps.run( - evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, - optimizer=neps.algorithms.neps_random_search, - root_directory="results/neps_spaces_nn_example", - evaluations_to_spend=5, - overwrite_root_directory=True, -) -neps.status( - "results/neps_spaces_nn_example", - print_summary=True, -) diff --git a/neps_examples/convenience/config_creation.py b/neps_examples/convenience/create_and_import_custom_config.py similarity index 100% rename from neps_examples/convenience/config_creation.py rename to neps_examples/convenience/create_and_import_custom_config.py From 5c583f1c9373b63e0485f191a186b75168be4e8d Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 27 Nov 2025 16:52:38 +0100 Subject: [PATCH 127/156] docs: Improve example description and comments in optimizer search example --- neps_examples/basic_usage/5_optimizer_search.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/neps_examples/basic_usage/5_optimizer_search.py b/neps_examples/basic_usage/5_optimizer_search.py index 2b5ea5eb1..f179c634b 100644 --- a/neps_examples/basic_usage/5_optimizer_search.py +++ b/neps_examples/basic_usage/5_optimizer_search.py @@ -1,11 +1,13 @@ """ -This example demonstrates how to use NePS to search for an optimizer +This example demonstrates how to use NePS to search for an optimizer. We define +a search space that samples different configurations of a simple custom optimizer +built using PyTorch. The pipeline optimizes a simple quadratic function using the +sampled optimizer. NePS is then run to find the best optimizer configuration. """ import neps import torch - def optimizer_constructor(*functions, gradient_clipping: float, learning_rate: float): # Build a simple optimizer that applies a sequence of functions to the gradients class CustomOptimizer(torch.optim.Optimizer): @@ -33,8 +35,9 @@ def step(self, _closure=None): return CustomOptimizer +# The search space defines the optimizer class constructed with sampled hyperparameters +# and functions class OptimizerSpace(neps.PipelineSpace): - _gradient_clipping = neps.Float(0.5, 1.0) _learning_rate = neps.Float(0.0001, 0.01, log=True) @@ -42,8 +45,7 @@ class OptimizerSpace(neps.PipelineSpace): choices=(torch.sqrt, torch.log, torch.exp, torch.sign, torch.abs) ) - # The optimizer class constructed with sampled hyperparameters - # and functions + optimizer_class = neps.Operation( operator=optimizer_constructor, args=( From 7898219cff7cad964840867b453dfff4e23556f3 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 27 Nov 2025 16:55:48 +0100 Subject: [PATCH 128/156] feat: Rename examples for hyperparameter optimization, architecture search, and custom optimizer in NePS --- .../{1_hyperparameters.py => ex1_hyperparameters.py} | 0 .../basic_usage/{2_run_analysis.py => ex2_run_analysis.py} | 0 .../{3_architecture_search.py => ex3_architecture_search.py} | 0 ...perparameters.py => ex4_architecture_and_hyperparameters.py} | 2 +- .../{5_optimizer_search.py => ex5_optimizer_search.py} | 0 5 files changed, 1 insertion(+), 1 deletion(-) rename neps_examples/basic_usage/{1_hyperparameters.py => ex1_hyperparameters.py} (100%) rename neps_examples/basic_usage/{2_run_analysis.py => ex2_run_analysis.py} (100%) rename neps_examples/basic_usage/{3_architecture_search.py => ex3_architecture_search.py} (100%) rename neps_examples/basic_usage/{4_architecture_and_hyperparameters.py => ex4_architecture_and_hyperparameters.py} (95%) rename neps_examples/basic_usage/{5_optimizer_search.py => ex5_optimizer_search.py} (100%) diff --git a/neps_examples/basic_usage/1_hyperparameters.py b/neps_examples/basic_usage/ex1_hyperparameters.py similarity index 100% rename from neps_examples/basic_usage/1_hyperparameters.py rename to neps_examples/basic_usage/ex1_hyperparameters.py diff --git a/neps_examples/basic_usage/2_run_analysis.py b/neps_examples/basic_usage/ex2_run_analysis.py similarity index 100% rename from neps_examples/basic_usage/2_run_analysis.py rename to neps_examples/basic_usage/ex2_run_analysis.py diff --git a/neps_examples/basic_usage/3_architecture_search.py b/neps_examples/basic_usage/ex3_architecture_search.py similarity index 100% rename from neps_examples/basic_usage/3_architecture_search.py rename to neps_examples/basic_usage/ex3_architecture_search.py diff --git a/neps_examples/basic_usage/4_architecture_and_hyperparameters.py b/neps_examples/basic_usage/ex4_architecture_and_hyperparameters.py similarity index 95% rename from neps_examples/basic_usage/4_architecture_and_hyperparameters.py rename to neps_examples/basic_usage/ex4_architecture_and_hyperparameters.py index 87827c384..ecea916ea 100644 --- a/neps_examples/basic_usage/4_architecture_and_hyperparameters.py +++ b/neps_examples/basic_usage/ex4_architecture_and_hyperparameters.py @@ -6,7 +6,7 @@ import neps import torch import numpy as np -from architecture_search import NN_Space +from ex3_architecture_search import NN_Space # Extend the architecture search space with a hyperparameter extended_space = NN_Space().add(neps.Integer(16, 128), name="batch_size") diff --git a/neps_examples/basic_usage/5_optimizer_search.py b/neps_examples/basic_usage/ex5_optimizer_search.py similarity index 100% rename from neps_examples/basic_usage/5_optimizer_search.py rename to neps_examples/basic_usage/ex5_optimizer_search.py From 82c198e199b3028226263b23b676bb311b16ede7 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 27 Nov 2025 16:59:03 +0100 Subject: [PATCH 129/156] docs: Update links for hyperparameter optimization examples and correct optimizer reference --- docs/getting_started.md | 2 +- docs/index.md | 2 +- docs/reference/neps_run.md | 2 +- neps/runtime.py | 3 --- 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/docs/getting_started.md b/docs/getting_started.md index 749deaba7..177e58817 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -58,7 +58,7 @@ The [reference](reference/neps_run.md) section provides detailed information on Or discover the features of NePS through these practical examples: -* **[Hyperparameter Optimization (HPO)](examples/basic_usage/hyperparameters.md)**: +* **[Hyperparameter Optimization (HPO)](examples/basic_usage/ex1_hyperparameters.md)**: Learn the essentials of hyperparameter optimization with NePS. * **[Multi-Fidelity Optimization](examples/efficiency/multi_fidelity.md)**: diff --git a/docs/index.md b/docs/index.md index f0e81d21a..bc62763f1 100644 --- a/docs/index.md +++ b/docs/index.md @@ -91,7 +91,7 @@ neps.run( Discover how NePS works through these examples: -- **[Hyperparameter Optimization](examples/basic_usage/hyperparameters.md)**: Learn the essentials of hyperparameter optimization with NePS. +- **[Hyperparameter Optimization](examples/basic_usage/ex1_hyperparameters.md)**: Learn the essentials of hyperparameter optimization with NePS. - **[Multi-Fidelity Optimization](examples/efficiency/multi_fidelity.md)**: Understand how to leverage multi-fidelity optimization for efficient model tuning. diff --git a/docs/reference/neps_run.md b/docs/reference/neps_run.md index 73d293e1f..291e0f5bf 100644 --- a/docs/reference/neps_run.md +++ b/docs/reference/neps_run.md @@ -146,7 +146,7 @@ neps.run( For details on: - [`neps.load_pipeline_space()`][neps.api.load_pipeline_space] - see [Search Space Reference](neps_spaces.md#loading-the-search-space-from-disk) -- [`neps.load_optimizer_info()`][neps.api.load_optimizer_info] - see [Optimizer Reference](optimizers.md#loading-optimizer-information) +- [`neps.load_optimizer_info()`][neps.api.load_optimizer_info] - see [Optimizer Reference](optimizers.md#24-loading-optimizer-information) ## Overwriting a Run diff --git a/neps/runtime.py b/neps/runtime.py index 6258e10cd..bb09f90de 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -795,11 +795,8 @@ def load_incumbent_trace( Args: trials (dict): A dictionary of the trials. _trace_lock (FileLock): A file lock to ensure thread-safe writing. - state (NePSState): The current NePS state. - settings (WorkerSettings): The worker settings. improvement_trace_path (Path): Path to the improvement trace file. best_config_path (Path): Path to the best configuration file. - optimizer (AskFunction): The optimizer used for sampling configurations. """ if not trials: return From 33d34ff9d7070eafb447deb8c517db1dc39abbb7 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 27 Nov 2025 17:04:35 +0100 Subject: [PATCH 130/156] feat: Update example names in all_main_examples and core_examples for consistency --- neps_examples/__init__.py | 21 ++++++++++++--------- tests/test_examples.py | 30 ------------------------------ 2 files changed, 12 insertions(+), 39 deletions(-) diff --git a/neps_examples/__init__.py b/neps_examples/__init__.py index 00a5f1ab9..a4220fb4d 100644 --- a/neps_examples/__init__.py +++ b/neps_examples/__init__.py @@ -1,12 +1,14 @@ all_main_examples = { # Used for printing in python -m neps_examples "basic_usage": [ - "run_analysis", - "architecture_search", - "architecture_and_hyperparameters", - "hyperparameters", - "pytorch_nn_example", + "ex1_hyperparameters", + "ex2_run_analysis", + "ex3_architecture_search", + "ex4_architecture_and_hyperparameters", + "ex5_optimizer_search", ], "convenience": [ + "create_and_import_custom_config", + "import_trial", "logging_additional_info", "neps_tblogger_tutorial", "running_on_slurm_scripts", @@ -24,10 +26,11 @@ } core_examples = [ # Run locally and on github actions - "basic_usage/hyperparameters", # NOTE: This needs to be first for some tests to work - "basic_usage/run_analysis", - "basic_usage/pytorch_nn_example", - "basic_usage/architecture_search", + "basic_usage/ex1_hyperparameters", # NOTE: This needs to be first for some tests to work + "basic_usage/ex2_run_analysis", + "basic_usage/ex3_architecture_search", + "basic_usage/ex4_architecture_and_hyperparameters", + "basic_usage/ex5_optimizer_search", "experimental/expert_priors_for_architecture_and_hyperparameters", "efficiency/multi_fidelity", ] diff --git a/tests/test_examples.py b/tests/test_examples.py index cf322c0e3..93ed60227 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -8,8 +8,6 @@ import pytest from neps_examples import ci_examples, core_examples -from neps.exceptions import WorkerFailedToGetPendingTrialsError - @pytest.fixture(autouse=True) def use_tmpdir(tmp_path, request): @@ -41,34 +39,6 @@ def test_core_examples(example): # Run hyperparameters example to have something to analyse runpy.run_path(str(core_examples_scripts[0]), run_name="__main__") - if example.name in ( - "architecture_and_hyperparameters.py", - "hierarchical_architecture.py", - "expert_priors_for_architecture_and_hyperparameters.py", - ): - pytest.xfail("Architecture were removed temporarily") - - # pytorch_nn_example has a known recursion issue in resolution - if example.name == "pytorch_nn_example.py": - try: - runpy.run_path(str(example), run_name="__main__") - except (RecursionError, WorkerFailedToGetPendingTrialsError) as e: - # RecursionError occurs during resolution of nested structures - # WorkerFailedToGetPendingTrialsError occurs when RecursionError repeats - # This is a known bug that should be fixed, so we use xfail instead of skip - error_str = str(e) - cause_str = str(e.__cause__) if e.__cause__ else "" - if ( - "RecursionError" in error_str - or "maximum recursion depth" in error_str - or "maximum recursion depth" in cause_str - ): - pytest.xfail( - "Known RecursionError bug in nested structure resolution:" - f" {type(e).__name__}" - ) - # If it's a different error, fail the test - raise else: runpy.run_path(str(example), run_name="__main__") From 72539941828b9dc5af88f4f93046b635c140acce Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 27 Nov 2025 17:11:02 +0100 Subject: [PATCH 131/156] feat: Update example files for hyperparameter optimization and architecture search; rename examples for consistency --- neps_examples/__init__.py | 22 +++-- ...yperparameters.py => 1_hyperparameters.py} | 0 ...{ex2_run_analysis.py => 2_run_analysis.py} | 0 ...ure_search.py => 3_architecture_search.py} | 0 .../4_architecture_and_hyperparameters.py | 81 +++++++++++++++++++ ...imizer_search.py => 5_optimizer_search.py} | 0 .../ex4_architecture_and_hyperparameters.py | 35 -------- 7 files changed, 91 insertions(+), 47 deletions(-) rename neps_examples/basic_usage/{ex1_hyperparameters.py => 1_hyperparameters.py} (100%) rename neps_examples/basic_usage/{ex2_run_analysis.py => 2_run_analysis.py} (100%) rename neps_examples/basic_usage/{ex3_architecture_search.py => 3_architecture_search.py} (100%) create mode 100644 neps_examples/basic_usage/4_architecture_and_hyperparameters.py rename neps_examples/basic_usage/{ex5_optimizer_search.py => 5_optimizer_search.py} (100%) delete mode 100644 neps_examples/basic_usage/ex4_architecture_and_hyperparameters.py diff --git a/neps_examples/__init__.py b/neps_examples/__init__.py index a4220fb4d..91bad03cf 100644 --- a/neps_examples/__init__.py +++ b/neps_examples/__init__.py @@ -1,10 +1,10 @@ all_main_examples = { # Used for printing in python -m neps_examples "basic_usage": [ - "ex1_hyperparameters", - "ex2_run_analysis", - "ex3_architecture_search", - "ex4_architecture_and_hyperparameters", - "ex5_optimizer_search", + "1_hyperparameters", + "2_run_analysis", + "3_architecture_search", + "4_architecture_and_hyperparameters", + "5_optimizer_search", ], "convenience": [ "create_and_import_custom_config", @@ -26,17 +26,15 @@ } core_examples = [ # Run locally and on github actions - "basic_usage/ex1_hyperparameters", # NOTE: This needs to be first for some tests to work - "basic_usage/ex2_run_analysis", - "basic_usage/ex3_architecture_search", - "basic_usage/ex4_architecture_and_hyperparameters", - "basic_usage/ex5_optimizer_search", - "experimental/expert_priors_for_architecture_and_hyperparameters", + "basic_usage/1_hyperparameters", # NOTE: This needs to be first for some tests to work + "basic_usage/2_run_analysis", + "basic_usage/3_architecture_search", + "basic_usage/4_architecture_and_hyperparameters", + "basic_usage/5_optimizer_search", "efficiency/multi_fidelity", ] ci_examples = [ # Run on github actions - "basic_usage/architecture_and_hyperparameters", "experimental/hierarchical_architecture", "efficiency/expert_priors_for_hyperparameters", "convenience/logging_additional_info", diff --git a/neps_examples/basic_usage/ex1_hyperparameters.py b/neps_examples/basic_usage/1_hyperparameters.py similarity index 100% rename from neps_examples/basic_usage/ex1_hyperparameters.py rename to neps_examples/basic_usage/1_hyperparameters.py diff --git a/neps_examples/basic_usage/ex2_run_analysis.py b/neps_examples/basic_usage/2_run_analysis.py similarity index 100% rename from neps_examples/basic_usage/ex2_run_analysis.py rename to neps_examples/basic_usage/2_run_analysis.py diff --git a/neps_examples/basic_usage/ex3_architecture_search.py b/neps_examples/basic_usage/3_architecture_search.py similarity index 100% rename from neps_examples/basic_usage/ex3_architecture_search.py rename to neps_examples/basic_usage/3_architecture_search.py diff --git a/neps_examples/basic_usage/4_architecture_and_hyperparameters.py b/neps_examples/basic_usage/4_architecture_and_hyperparameters.py new file mode 100644 index 000000000..43192d30b --- /dev/null +++ b/neps_examples/basic_usage/4_architecture_and_hyperparameters.py @@ -0,0 +1,81 @@ +""" +This example demonstrates how to combine neural network architecture +search with hyperparameter optimization using NePS. +""" + +import neps +import torch +import numpy as np +from torch import nn + +# Using the space from the architecture search example +class NN_Space(neps.PipelineSpace): + + _kernel_size = neps.Integer(2, 7) + + # Building blocks of the neural network architecture + # The convolution layer with sampled kernel size + _conv = neps.Operation( + operator=nn.Conv2d, + kwargs={ + "in_channels": 3, + "out_channels": 3, + "kernel_size": neps.Resampled(_kernel_size), + "padding": "same", + }, + ) + + # Non-linearity layer sampled from a set of choices + _nonlinearity = neps.Categorical( + choices=( + nn.ReLU(), + nn.Sigmoid(), + nn.Tanh(), + ) + ) + + # A cell consisting of a convolution followed by a non-linearity + _cell = neps.Operation( + operator=nn.Sequential, + args=( + neps.Resampled(_conv), + neps.Resampled(_nonlinearity), + ), + ) + + # The full model consisting of three cells stacked sequentially + model = neps.Operation( + operator=nn.Sequential, + args=( + neps.Resampled(_cell), + neps.Resampled(_cell), + neps.Resampled(_cell), + ), + ) + + +# Extend the architecture search space with a hyperparameter +extended_space = NN_Space().add(neps.Integer(16, 128), name="batch_size") + +def evaluate_pipeline(model: torch.nn.Module, batch_size: int) -> float: + # For demonstration, we return a dummy objective value + # In practice, you would train and evaluate the model here + x = torch.ones(size=[1, 3, 220, 220]) + result = np.sum(model(x).detach().numpy().flatten()) + + objective_value = batch_size * result # Dummy computation + return objective_value + + +if __name__ == "__main__": + neps.run( + evaluate_pipeline=evaluate_pipeline, + pipeline_space=extended_space, + root_directory="results/architecture_with_hp_example", + evaluations_to_spend=5, + overwrite_root_directory=True, + ) + neps.status( + root_directory="results/architecture_with_hp_example", + print_summary=True, + ) diff --git a/neps_examples/basic_usage/ex5_optimizer_search.py b/neps_examples/basic_usage/5_optimizer_search.py similarity index 100% rename from neps_examples/basic_usage/ex5_optimizer_search.py rename to neps_examples/basic_usage/5_optimizer_search.py diff --git a/neps_examples/basic_usage/ex4_architecture_and_hyperparameters.py b/neps_examples/basic_usage/ex4_architecture_and_hyperparameters.py deleted file mode 100644 index ecea916ea..000000000 --- a/neps_examples/basic_usage/ex4_architecture_and_hyperparameters.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -This example demonstrates how to combine neural network architecture -search with hyperparameter optimization using NePS. -""" - -import neps -import torch -import numpy as np -from ex3_architecture_search import NN_Space - -# Extend the architecture search space with a hyperparameter -extended_space = NN_Space().add(neps.Integer(16, 128), name="batch_size") - -def evaluate_pipeline(model: torch.nn.Module, batch_size: int) -> float: - # For demonstration, we return a dummy objective value - # In practice, you would train and evaluate the model here - x = torch.ones(size=[1, 3, 220, 220]) - result = np.sum(model(x).detach().numpy().flatten()) - - objective_value = batch_size * result # Dummy computation - return objective_value - - -if __name__ == "__main__": - neps.run( - evaluate_pipeline=evaluate_pipeline, - pipeline_space=extended_space, - root_directory="results/architecture_with_hp_example", - evaluations_to_spend=5, - overwrite_root_directory=True, - ) - neps.status( - root_directory="results/architecture_with_hp_example", - print_summary=True, - ) From 8b893177a0e0d36c1d8080b61cd914195313423b Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 27 Nov 2025 17:23:42 +0100 Subject: [PATCH 132/156] feat: Update core_examples and remove experimental example for consistency --- neps_examples/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neps_examples/__init__.py b/neps_examples/__init__.py index 91bad03cf..039f85bc3 100644 --- a/neps_examples/__init__.py +++ b/neps_examples/__init__.py @@ -32,11 +32,11 @@ "basic_usage/4_architecture_and_hyperparameters", "basic_usage/5_optimizer_search", "efficiency/multi_fidelity", + "efficiency/expert_priors_for_hyperparameters", + "efficiency/multi_fidelity_and_expert_priors", ] ci_examples = [ # Run on github actions - "experimental/hierarchical_architecture", - "efficiency/expert_priors_for_hyperparameters", "convenience/logging_additional_info", "convenience/working_directory_per_pipeline", "convenience/neps_tblogger_tutorial", From e62b1f6ad7198083f7dbd8884b695294fb7d34fa Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 27 Nov 2025 17:24:32 +0100 Subject: [PATCH 133/156] fix: Rename hyperparameter optimization example file for consistency --- docs/getting_started.md | 2 +- docs/index.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/getting_started.md b/docs/getting_started.md index 177e58817..91bacec44 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -58,7 +58,7 @@ The [reference](reference/neps_run.md) section provides detailed information on Or discover the features of NePS through these practical examples: -* **[Hyperparameter Optimization (HPO)](examples/basic_usage/ex1_hyperparameters.md)**: +* **[Hyperparameter Optimization (HPO)](examples/basic_usage/1_hyperparameters.md)**: Learn the essentials of hyperparameter optimization with NePS. * **[Multi-Fidelity Optimization](examples/efficiency/multi_fidelity.md)**: diff --git a/docs/index.md b/docs/index.md index bc62763f1..dffc64211 100644 --- a/docs/index.md +++ b/docs/index.md @@ -91,7 +91,7 @@ neps.run( Discover how NePS works through these examples: -- **[Hyperparameter Optimization](examples/basic_usage/ex1_hyperparameters.md)**: Learn the essentials of hyperparameter optimization with NePS. +- **[Hyperparameter Optimization](examples/basic_usage/1_hyperparameters.md)**: Learn the essentials of hyperparameter optimization with NePS. - **[Multi-Fidelity Optimization](examples/efficiency/multi_fidelity.md)**: Understand how to leverage multi-fidelity optimization for efficient model tuning. From 16a8b6b709cd140a4c69b757f4d5233d2d20cfd5 Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 28 Nov 2025 12:19:56 +0100 Subject: [PATCH 134/156] Refactor string formatting for operations and enhance pretty-printing - Introduced a new `string_formatter` module to handle the pretty formatting of Operation objects, replacing the previous `operation_formatter`. - Updated `IOSampler` to use a new `format_choice` function for better representation of categorical choices. - Modified the `Summary` class to utilize the new `format_value` function for pipeline configurations. - Enhanced example scripts to improve readability and maintainability, including better logging and formatting. - Added comprehensive tests for the new string formatting functionality, ensuring correct representation of various operation types and structures. - Removed legacy code and references to the old formatting approach, streamlining the codebase. --- README.md | 23 +- docs/reference/neps_run.md | 4 +- docs/reference/neps_spaces.md | 10 +- neps/api.py | 92 +++- neps/space/neps_spaces/config_string.py | 60 --- neps/space/neps_spaces/neps_space.py | 4 +- neps/space/neps_spaces/operation_formatter.py | 294 ------------- neps/space/neps_spaces/parameters.py | 83 +--- neps/space/neps_spaces/sampling.py | 11 +- neps/space/neps_spaces/string_formatter.py | 406 ++++++++++++++++++ neps/status/status.py | 4 +- .../basic_usage/1_hyperparameters.py | 8 +- .../basic_usage/3_architecture_search.py | 34 +- .../4_architecture_and_hyperparameters.py | 39 +- .../basic_usage/5_optimizer_search.py | 29 +- .../create_and_import_custom_config.py | 2 - .../test_search_space__grammar_like.py | 6 +- .../test_search_space__hnas_like.py | 6 +- .../test_search_space__nos_like.py | 4 +- ..._formatter.py => test_string_formatter.py} | 6 +- 20 files changed, 636 insertions(+), 489 deletions(-) delete mode 100644 neps/space/neps_spaces/config_string.py delete mode 100644 neps/space/neps_spaces/operation_formatter.py create mode 100644 neps/space/neps_spaces/string_formatter.py rename tests/test_neps_space/{test_operation_formatter.py => test_string_formatter.py} (99%) diff --git a/README.md b/README.md index 5356de78f..a30c0cb9e 100644 --- a/README.md +++ b/README.md @@ -41,8 +41,8 @@ Using `neps` always follows the same pattern: 1. Define a `evaluate_pipeline` function capable of evaluating different architectural and/or hyperparameter configurations for your problem. -1. Define a search space named `pipeline_space` of those Parameters e.g. via a dictionary -1. Call `neps.run(evaluate_pipeline, pipeline_space)` +2. Define a `pipeline_space` of those Parameters +3. Call `neps.run(evaluate_pipeline, pipeline_space)` In code, the usage pattern can look like this: @@ -53,7 +53,7 @@ import logging logging.basicConfig(level=logging.INFO) # 1. Define a function that accepts hyperparameters and computes the validation error -def evaluate_pipeline(lr: float, alpha: int, optimizer: str) -> float: +def evaluate_pipeline(lr: float, alpha: int, optimizer: str): # Create your model model = MyModel(lr=lr, alpha=alpha, optimizer=optimizer) @@ -63,21 +63,20 @@ def evaluate_pipeline(lr: float, alpha: int, optimizer: str) -> float: # 2. Define a search space of parameters; use the same parameter names as in evaluate_pipeline -pipeline_space = dict( - lr=neps.Float( +class ExampleSpace(neps.PipelineSpace): + lr = neps.Float( lower=1e-5, upper=1e-1, log=True, # Log spaces - prior=1e-3, # Incorporate you knowledge to help optimization - ), - alpha=neps.Integer(lower=1, upper=42), - optimizer=neps.Categorical(choices=["sgd", "adam"]) -) + prior=1e-3, # Incorporate your knowledge to help optimization + ) + alpha = neps.Integer(lower=1, upper=42) + optimizer = neps.Categorical(choices=["sgd", "adam"]) # 3. Run the NePS optimization neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=pipeline_space, + pipeline_space=ExampleSpace(), root_directory="path/to/save/results", # Replace with the actual path. evaluations_to_spend=100, ) @@ -93,6 +92,8 @@ Discover how NePS works through these examples: - **[Utilizing Expert Priors for Hyperparameters](neps_examples/efficiency/expert_priors_for_hyperparameters.py)**: Learn how to incorporate expert priors for more efficient hyperparameter selection. +- **[Benefiting NePS State and Optimizers with custom runtime](neps_examples/experimental/ask_and_tell_example.py)**: Learn how to use AskAndTell, an advanced tool for leveraging optimizers and states while enabling a custom runtime for trial execution. + - **[Additional NePS Examples](neps_examples/)**: Explore more examples, including various use cases and advanced configurations in NePS. ## Contributing diff --git a/docs/reference/neps_run.md b/docs/reference/neps_run.md index 291e0f5bf..9b725a955 100644 --- a/docs/reference/neps_run.md +++ b/docs/reference/neps_run.md @@ -108,7 +108,7 @@ If the run previously stopped due to reaching a budget and you specify the same !!! note "Auto-loading" - When continuing a run, NePS automatically loads the search space and optimizer configuration from disk. You don't need to specify `pipeline_space=` or `searcher=` again - NePS will use the saved settings from the original run. + When continuing a run, NePS automatically loads the search space and optimizer configuration from disk. You don't need to specify `pipeline_space=` or `optimizer=` again - NePS will use the saved settings from the original run. ## Reconstructing and Reproducing Runs @@ -138,7 +138,7 @@ neps.run( evaluate_pipeline=my_function, pipeline_space=pipeline_space, root_directory="path/to/new_run", - searcher=optimizer_info['name'], + optimizer=optimizer_info['name'], evaluations_to_spend=50, ) ``` diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index ae7189a33..faffb92be 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -59,16 +59,14 @@ For more details on how to use priors, see the [Priors](../reference/search_algo !!! info "Adding and removing parameters from **NePS Spaces**" - To add or remove parameters from a `PipelineSpace` after its definition, you can use the `+` operator or the `add()` and `remove()` methods. Mind you, these methods do NOT modify the existing space in-place, but return a new instance with the modifications: + To add or remove parameters from a `PipelineSpace` after its definition, you can use the `add()` and `remove()` methods. Mind you, these methods do NOT modify the existing space in-place, but return a new instance with the modifications: ```python space = MySpace() - # Adding a new parameter, this will appear as param_n where n is the next available index - space = space + neps.Float(lower=0.01, upper=0.1) - # Or using the add() method, this allows you to specify a name - space = space.add(neps.Integer(lower=5, upper=15), name="new_int_param") + # Adding a new parameter using add() + larger_space = space.add(neps.Integer(lower=5, upper=15), name="new_int_param") # Removing a parameter by its name - space = space.remove("cat_param") + smaller_space = space.remove("cat_param") ``` ## 3. Constructing Architecture Spaces diff --git a/neps/api.py b/neps/api.py index 3e4bdb58c..bbea06637 100644 --- a/neps/api.py +++ b/neps/api.py @@ -24,6 +24,7 @@ resolve, ) from neps.space.neps_spaces.parameters import Operation, PipelineSpace +from neps.space.neps_spaces.string_formatter import format_value from neps.space.parsing import convert_to_space from neps.state import NePSState, OptimizationState, SeedSnapshot from neps.status.status import post_run_csv @@ -439,6 +440,9 @@ def __call__( "'priorband', or 'complex_random_search'." ) + # Log the search space after conversion + logger.info(str(space)) + _optimizer_ask, _optimizer_info = load_optimizer(optimizer=optimizer, space=space) multi_fidelity_optimizers = { @@ -555,7 +559,9 @@ def import_trials( # noqa: C901 OptimizerChoice | Mapping[str, Any] | tuple[OptimizerChoice, Mapping[str, Any]] - | Callable[Concatenate[SearchSpace, ...], AskFunction] + | Callable[Concatenate[SearchSpace, ...], AskFunction] # Hack, while we transit + | Callable[Concatenate[PipelineSpace, ...], AskFunction] # from SearchSpace to + | Callable[Concatenate[SearchSpace | PipelineSpace, ...], AskFunction] # Pipeline | CustomOptimizer | Literal["auto"] ) = "auto", @@ -701,31 +707,83 @@ def import_trials( # noqa: C901 state.lock_and_import_trials(imported_trials, worker_id="external") -def create_config( - pipeline_space: PipelineSpace, +def create_config( # noqa: C901 + pipeline_space: PipelineSpace | None = None, + root_directory: Path | str | None = None, ) -> tuple[Mapping[str, Any], dict[str, Any]]: """Create a configuration by prompting the user for input. Args: pipeline_space: The pipeline space to create a configuration for. + If None, will attempt to load from + `root_directory/pipeline_space.pkl` if `root_directory` is + provided. + root_directory: The root directory to load the pipeline space from + if `pipeline_space` is None. Returns: - A tuple containing the created configuration dictionary and the sampled pipeline. + A tuple containing the created configuration dictionary and the + sampled pipeline. """ from neps.space.neps_spaces.neps_space import NepsCompatConverter from neps.space.neps_spaces.sampling import IOSampler + # Try to load pipeline_space from disk if path is provided + if root_directory: + try: + loaded_space = load_pipeline_space(root_directory) + except (FileNotFoundError, ValueError) as e: + # If loading fails, we'll error below + raise ValueError( + f"Could not load pipeline space from disk at {root_directory}: {e}" + ) from e + # Validate loaded space is a PipelineSpace + if not isinstance(loaded_space, PipelineSpace): + raise ValueError( + "create_config only supports PipelineSpace. The loaded space " + f"from {root_directory} is not a PipelineSpace." + ) + + if pipeline_space is None: + pipeline_space = loaded_space + else: + # Validate provided pipeline_space is a PipelineSpace + if not isinstance(pipeline_space, PipelineSpace): + raise ValueError( + "create_config only supports PipelineSpace. The provided " + "pipeline_space is not a PipelineSpace." + ) + + # Validate provided pipeline_space matches loaded one + import pickle + + if pickle.dumps(loaded_space) != pickle.dumps(pipeline_space): + raise ValueError( + "The pipeline_space provided does not match the one saved on" + " disk.\nPipeline space location:" + f" {Path(root_directory) / 'pipeline_space.pkl'}\nPlease either:\n" + " 1. Don't provide pipeline_space (it will be loaded automatically)," + " or\n 2. Provide the same pipeline_space that was used in" + " neps.run()" + ) + elif pipeline_space is None: + raise ValueError( + "pipeline_space or root_directory is required when creating a configuration." + ) + resolved_pipeline, resolution_context = resolve( pipeline_space, domain_sampler=IOSampler() ) + # Print the resolved pipeline + pipeline_dict = dict(**resolved_pipeline.get_attrs()) for name, value in pipeline_dict.items(): if isinstance(value, Operation): # If the operator is a not a string, we convert it to a callable. if isinstance(value.operator, str): - pipeline_dict[name] = str(value) + pipeline_dict[name] = format_value(value) else: pipeline_dict[name] = convert_operation_to_callable(value) @@ -765,8 +823,15 @@ def load_config( # noqa: C901, PLR0912, PLR0915 root_dir = Path( str_path_temp.split("/configs/")[0].split("\\configs\\")[0] ) - else: + # If no /configs/ in path, assume it's either: + # 1. The root directory itself + # 2. A direct config file path (ends with .yaml/.yml) + elif str_path_temp.endswith((".yaml", ".yml")): + # It's a direct config file path, go up two levels root_dir = Path(str_path_temp).parent.parent + else: + # It's the root directory itself + root_dir = Path(str_path_temp) state = NePSState.create_or_load(path=root_dir, load_only=True) pipeline_space = state.lock_and_get_search_space() @@ -791,8 +856,15 @@ def load_config( # noqa: C901, PLR0912, PLR0915 root_dir = Path( str_path_temp.split("/configs/")[0].split("\\configs\\")[0] ) - else: + # If no /configs/ in path, assume it's either: + # 1. The root directory itself + # 2. A direct config file path (ends with .yaml/.yml) + elif str_path_temp.endswith((".yaml", ".yml")): + # It's a direct config file path, go up two levels root_dir = Path(str_path_temp).parent.parent + else: + # It's the root directory itself + root_dir = Path(str_path_temp) state = NePSState.create_or_load(path=root_dir, load_only=True) disk_space = state.lock_and_get_search_space() @@ -879,13 +951,15 @@ def load_config( # noqa: C901, PLR0912, PLR0915 environment_values=converted_dict.environment_values, ) + # Print the resolved pipeline + pipeline_dict = dict(**pipeline.get_attrs()) for name, value in pipeline_dict.items(): if isinstance(value, Operation): # If the operator is a not a string, we convert it to a callable. if isinstance(value.operator, str): - pipeline_dict[name] = str(value) + pipeline_dict[name] = format_value(value) else: pipeline_dict[name] = convert_operation_to_callable(value) @@ -976,7 +1050,7 @@ def load_optimizer_info( evaluate_pipeline=my_function, pipeline_space=MySpace(), root_directory="results", - searcher="bayesian_optimization", + optimizer="bayesian_optimization", ) # Later, check what optimizer was used diff --git a/neps/space/neps_spaces/config_string.py b/neps/space/neps_spaces/config_string.py deleted file mode 100644 index 9f5bfe583..000000000 --- a/neps/space/neps_spaces/config_string.py +++ /dev/null @@ -1,60 +0,0 @@ -"""This module provides functionality to format configuration strings -used in NePS spaces for display purposes. -""" - -from __future__ import annotations - -from typing import TYPE_CHECKING, Any - -from neps.space.neps_spaces.operation_formatter import operation_to_string - -if TYPE_CHECKING: - from neps.space.neps_spaces.parameters import Operation - - -class ConfigString: - """A class representing a configuration string in NePS spaces. - - This class provides pretty-formatted output for displaying Operation objects - to users. It uses the new operation_formatter module internally. - """ - - def __init__(self, config: str | Operation | Any) -> None: - """Initialize the ConfigString with a configuration. - - Args: - config: Either a string (for backward compatibility) or an Operation object - - Raises: - ValueError: If the config is None or empty. - """ - if config is None or (isinstance(config, str) and len(config) == 0): - raise ValueError(f"Invalid config: {config}") - - self.config = config - - def pretty_format(self) -> str: - """Get a pretty formatted string representation of the configuration. - - Returns: - A Pythonic multi-line string representation with proper indentation. - """ - from neps.space.neps_spaces.parameters import Operation - - if isinstance(self.config, Operation): - # Use the new formatter for Operation objects - return operation_to_string(self.config) - - # For string config (backward compatibility), just return as-is - return str(self.config) - - def __eq__(self, other: object) -> bool: - if isinstance(other, self.__class__): - return str(self.config) == str(other.config) - raise NotImplementedError() # let the other side check for equality - - def __ne__(self, other: object) -> bool: - return not self.__eq__(other) - - def __hash__(self) -> int: - return str(self.config).__hash__() diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 748c1d84a..022f11993 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -941,13 +941,13 @@ def convert_operation_to_string(operation: Operation | str | int | float) -> str Raises: ValueError: If the operation is not a valid Operation object. """ - from neps.space.neps_spaces.operation_formatter import operation_to_string + from neps.space.neps_spaces.string_formatter import format_value # Handle non-Operation values (resolved primitives) if not isinstance(operation, Operation): return str(operation) - return operation_to_string(operation) + return format_value(operation) # ------------------------------------------------- diff --git a/neps/space/neps_spaces/operation_formatter.py b/neps/space/neps_spaces/operation_formatter.py deleted file mode 100644 index f85c06f07..000000000 --- a/neps/space/neps_spaces/operation_formatter.py +++ /dev/null @@ -1,294 +0,0 @@ -"""Pretty formatting for Operation objects. - -This module provides functionality to convert Operation objects into -human-readable formatted strings. The format is Pythonic and preserves -all information including nested operations, lists, tuples, and dicts. -""" - -from __future__ import annotations - -from dataclasses import dataclass -from typing import TYPE_CHECKING, Any - -if TYPE_CHECKING: - from neps.space.neps_spaces.parameters import Operation - - -@dataclass -class FormatterStyle: - """Configuration for the formatting style.""" - - indent_str: str = " " # Two spaces for indentation - max_line_length: int = 80 # Try to keep lines under this length - compact_threshold: int = 40 # Use compact format if repr is shorter - show_empty_args: bool = True # Show () for operations with no args/kwargs - - -def _format_value( - value: Any, - indent: int, - style: FormatterStyle, -) -> str: - """Format a value (could be primitive, list, tuple, dict, or Operation). - - Args: - value: The value to format - indent: Current indentation level - style: Formatting style configuration - - Returns: - Formatted string representation of the value - """ - from neps.space.neps_spaces.parameters import Operation - - if isinstance(value, Operation): - # Recursively format nested operations - return _format_operation(value, indent, style) - - if isinstance(value, list | tuple): - return _format_sequence(value, indent, style) - - if isinstance(value, dict): - return _format_dict(value, indent, style) - - # For callables (functions, methods), show their name instead of repr - if callable(value) and (name := getattr(value, "__name__", None)): - return name - - # For identifier strings, don't add quotes - if isinstance(value, str) and value.isidentifier(): - return value - - # For other values, use repr - return repr(value) - - -def _format_sequence( - seq: list | tuple, - indent: int, - style: FormatterStyle, -) -> str: - """Format a list or tuple, using compact or expanded format as needed.""" - from neps.space.neps_spaces.parameters import Operation - - if not seq: - return "[]" if isinstance(seq, list) else "()" - - # Try compact format first - compact = repr(seq) - if len(compact) <= style.compact_threshold and "\n" not in compact: - return compact - - # Use expanded format for complex sequences - is_list = isinstance(seq, list) - bracket_open, bracket_close = ("[", "]") if is_list else ("(", ")") - - indent_str = style.indent_str * indent - inner_indent_str = style.indent_str * (indent + 1) - - # Check if any element is an Operation (needs expansion) - has_operations = any(isinstance(item, Operation) for item in seq) - - if has_operations: - # Full expansion with each item on its own line - lines = [bracket_open] - for item in seq: - formatted = _format_value(item, indent + 1, style) - lines.append(f"{inner_indent_str}{formatted},") - lines.append(f"{indent_str}{bracket_close}") - return "\n".join(lines) - - # Simple items - try to fit multiple per line - lines = [bracket_open] - current_line: list[str] = [] - current_length = 0 - - for item in seq: - item_repr = repr(item) - item_len = len(item_repr) + 2 # +2 for ", " - - if current_line and current_length + item_len > style.max_line_length: - # Start new line - lines.append(f"{inner_indent_str}{', '.join(current_line)},") - current_line = [item_repr] - current_length = len(item_repr) - else: - current_line.append(item_repr) - current_length += item_len - - if current_line: - lines.append(f"{inner_indent_str}{', '.join(current_line)},") - - lines.append(f"{indent_str}{bracket_close}") - return "\n".join(lines) - - -def _format_dict( - d: dict, - indent: int, - style: FormatterStyle, -) -> str: - """Format a dictionary.""" - if not d: - return "{}" - - # Try compact format first - compact = repr(d) - if len(compact) <= style.compact_threshold: - return compact - - # Use expanded format - indent_str = style.indent_str * indent - inner_indent_str = style.indent_str * (indent + 1) - - lines = ["{"] - for key, value in d.items(): - formatted_value = _format_value(value, indent + 1, style) - lines.append(f"{inner_indent_str}{key!r}: {formatted_value},") - lines.append(f"{indent_str}}}") - return "\n".join(lines) - - -def _format_operation( - operation: Operation, - indent: int, - style: FormatterStyle, -) -> str: - """Format an Operation object. - - Args: - operation: The Operation to format - indent: Current indentation level - style: Formatting style configuration - - Returns: - Formatted string representation - """ - # Get operator name - operator_name = ( - operation.operator - if isinstance(operation.operator, str) - else operation.operator.__name__ - ) - - # Check if we have any args or kwargs - has_args = bool(operation.args) - has_kwargs = bool(operation.kwargs) - - if not (has_args or has_kwargs): - return f"{operator_name}()" if style.show_empty_args else operator_name - - # Always use multi-line format for consistency and readability - # Build the multi-line formatted string - indent_str = style.indent_str * indent - inner_indent_str = style.indent_str * (indent + 1) - - lines = [f"{operator_name}("] - - # Format args - if has_args: - for arg in operation.args: - formatted = _format_value(arg, indent + 1, style) - lines.append(f"{inner_indent_str}{formatted},") - - # Format kwargs - if has_kwargs: - for key, value in operation.kwargs.items(): - formatted_value = _format_value(value, indent + 1, style) - lines.append(f"{inner_indent_str}{key}={formatted_value},") - - lines.append(f"{indent_str})") - - return "\n".join(lines) - - -def operation_to_string( - operation: Operation | Any, - style: FormatterStyle | None = None, -) -> str: - """Convert an Operation to a pretty-formatted string. - - This function produces a Pythonic representation of the Operation - that preserves all information and is easy to read. - - Args: - operation: The Operation to format (or any value) - style: Formatting style configuration (uses default if None) - - Returns: - Pretty-formatted string representation - - Example: - >>> op = Operation( - ... operator=nn.Sequential, - ... args=( - ... Operation(nn.Conv2d, kwargs={'in_channels': 3, 'kernel_size': - ... [3, 3]}), - ... Operation(nn.ReLU), - ... ), - ... ) - >>> print(operation_to_string(op)) - Sequential( - Conv2d( - in_channels=3, - kernel_size=[3, 3], - ), - ReLU, - ) - """ - from neps.space.neps_spaces.parameters import Operation - - if style is None: - style = FormatterStyle() - - if not isinstance(operation, Operation): - # Not an operation - just format the value - return _format_value(operation, 0, style) - - return _format_operation(operation, 0, style) - - -class ConfigString: - """A class representing a configuration string in NePS spaces. - - This class provides pretty-formatted output for displaying Operation objects - to users. It's a lightweight wrapper around operation_to_string for backward - compatibility. - """ - - def __init__(self, config: str | Operation | Any) -> None: - """Initialize the ConfigString with a configuration. - - Args: - config: Either a string (for backward compatibility) or an Operation object - - Raises: - ValueError: If the config is None or empty. - """ - if config is None or (isinstance(config, str) and not config): - raise ValueError(f"Invalid config: {config}") - - self.config = config - - def pretty_format(self) -> str: - """Get a pretty formatted string representation of the configuration. - - Returns: - A Pythonic multi-line string representation with proper indentation. - """ - from neps.space.neps_spaces.parameters import Operation - - if isinstance(self.config, Operation): - # Use the formatter for Operation objects - return operation_to_string(self.config) - - # For string config (backward compatibility), just return as-is - return str(self.config) - - def __eq__(self, other: object) -> bool: - if not isinstance(other, self.__class__): - return NotImplemented - return str(self.config) == str(other.config) - - def __hash__(self) -> int: - return hash(str(self.config)) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 2442bb9d8..3b59c603a 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -304,12 +304,10 @@ def __str__(self) -> str: A string representation of the pipeline, including its class name and attributes. """ - attrs = "\n\t".join( - f"{k} = {v!s}" - for k, v in self.get_attrs().items() - if not k.startswith("_") and not callable(v) - ) - return f"{self.__class__.__name__} with parameters:\n\t{attrs}" + from neps.space.neps_spaces.string_formatter import format_value + + # Delegate to the unified formatter + return format_value(self) def add( self, @@ -745,25 +743,9 @@ def __init__( def __str__(self) -> str: """Get a string representation of the categorical domain.""" - str_choices = [ - ( - choice.__name__ # type: ignore[union-attr] - if ( - callable(choice) - and not isinstance(choice, Resolvable) - and hasattr(choice, "__name__") - ) - else str(choice) - ) - for choice in self.choices # type: ignore[union-attr] - ] - string = f"Categorical(choices = ({', '.join(str_choices)}))" - if self.has_prior: - string += ( - f", prior = {self._prior}, prior_confidence = {self._prior_confidence}" - ) - string += ")" - return string + from neps.space.neps_spaces.string_formatter import format_value + + return format_value(self) def compare_domain_to(self, other: object) -> bool: """Check if this categorical parameter is equivalent to another. @@ -985,13 +967,9 @@ def __init__( def __str__(self) -> str: """Get a string representation of the floating-point domain.""" - string = f"Float({self._lower}, {self._upper}" - if self._log: - string += ", log" - if self.has_prior: - string += f", prior={self._prior}, prior_confidence={self._prior_confidence}" - string += ")" - return string + from neps.space.neps_spaces.string_formatter import format_value + + return format_value(self) def compare_domain_to(self, other: object) -> bool: """Check if this float parameter is equivalent to another. @@ -1219,13 +1197,9 @@ def __init__( def __str__(self) -> str: """Get a string representation of the integer domain.""" - string = f"Integer({self._lower}, {self._upper}" - if self._log: - string += ", log" - if self.has_prior: - string += f", prior={self._prior}, prior_confidence={self._prior_confidence}" - string += ")" - return string + from neps.space.neps_spaces.string_formatter import format_value + + return format_value(self) def compare_domain_to(self, other: object) -> bool: """Check if this integer parameter is equivalent to another. @@ -1429,32 +1403,9 @@ def __init__( def __str__(self) -> str: """Get a string representation of the operation.""" - if self._args != (): - args_str = "args=" - if isinstance(self._args, Resolvable): - args_str += str(self._args) - else: - args_str += "(" + ", ".join(str(arg) for arg in self._args) + ")" - else: - args_str = "" + from neps.space.neps_spaces.string_formatter import format_value - if self._kwargs != {}: - kwargs_str = "kwargs=" - if isinstance(self._kwargs, Resolvable): - kwargs_str += str(self._kwargs) - else: - kwargs_str += ( - "{" + ", ".join(f"{k}={v!s}" for k, v in self._kwargs.items()) + "}" - ) - else: - kwargs_str = "" - - args_str += ", " if args_str and kwargs_str else "" - - operator_name = ( - self._operator if isinstance(self._operator, str) else self._operator.__name__ - ) - return f"{operator_name}({args_str}{kwargs_str})" + return format_value(self) def compare_domain_to(self, other: object) -> bool: """Check if this operation parameter is equivalent to another. @@ -1575,7 +1526,9 @@ def __init__(self, source: Resolvable | str): self._source = source def __str__(self) -> str: - return f"Resampled({self._source!s})" + from neps.space.neps_spaces.string_formatter import format_value + + return format_value(self) @property def source(self) -> Resolvable | str: diff --git a/neps/space/neps_spaces/sampling.py b/neps/space/neps_spaces/sampling.py index fbc1f00d4..cc6b988f0 100644 --- a/neps/space/neps_spaces/sampling.py +++ b/neps/space/neps_spaces/sampling.py @@ -149,7 +149,7 @@ def __call__( class IOSampler(DomainSampler): """A sampler that samples by asking the user at each decision.""" - def __call__( + def __call__( # noqa: C901 self, *, domain_obj: Domain[T], @@ -172,8 +172,15 @@ def __call__( f" {domain_obj.upper}]: ", # type: ignore[attr-defined] ) elif isinstance(domain_obj, Categorical): + + def format_choice(choice: Any) -> str: + """Format a choice nicely, especially for callables.""" + if callable(choice) and (name := getattr(choice, "__name__", None)): + return name + return str(choice) + choices_list = "\n\t".join( - f"{n}: {choice!s}" + f"{n}: {format_choice(choice)}" for n, choice in enumerate(domain_obj.choices) # type: ignore[attr-defined, arg-type] ) max_index = int(domain_obj.range_compatibility_identifier) - 1 # type: ignore[attr-defined] diff --git a/neps/space/neps_spaces/string_formatter.py b/neps/space/neps_spaces/string_formatter.py new file mode 100644 index 000000000..158a7eb9c --- /dev/null +++ b/neps/space/neps_spaces/string_formatter.py @@ -0,0 +1,406 @@ +"""Pretty formatting for Operation objects. + +This module provides functionality to convert Operation objects into +human-readable formatted strings. The format is Pythonic and preserves +all information including nested operations, lists, tuples, and dicts. + +ARCHITECTURE: + format_value() - Single entry point for ALL formatting + ├── _format_categorical() - Internal handler for Categorical + ├── _format_float() - Internal handler for Float + ├── _format_integer() - Internal handler for Integer + ├── _format_resampled() - Internal handler for Resampled + ├── _format_repeated() - Internal handler for Repeated + ├── _format_operation() - Internal handler for Operation + ├── _format_sequence() - Internal handler for list/tuple + └── _format_dict() - Internal handler for dict + +All __str__ methods should call format_value() directly. +All internal formatters call format_value() for nested values. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from neps.space.neps_spaces.parameters import ( + Categorical, + Float, + Integer, + Operation, + Repeated, + Resampled, + ) + + +@dataclass +class FormatterStyle: + """Configuration for the formatting style.""" + + indent_str: str = " " # Three spaces for indentation + max_line_length: int = 80 # Try to keep lines under this length + compact_threshold: int = 40 # Use compact format if repr is shorter + show_empty_args: bool = True # Show () for operations with no args/kwargs + + +# ============================================================================ +# PUBLIC API - Single entry point for all formatting +# ============================================================================ + + +def format_value( # noqa: C901, PLR0911 + value: Any, + indent: int = 0, + style: FormatterStyle | None = None, +) -> str: + """Format any value with proper indentation and style. + + This is the SINGLE entry point for all formatting in NePS. + All __str__ methods should delegate to this function. + + Args: + value: The value to format (any type) + indent: Current indentation level + style: Formatting style configuration + + Returns: + Formatted string representation + """ + from neps.space.neps_spaces.parameters import ( + Categorical, + Float, + Integer, + Operation, + Repeated, + Resampled, + ) + + if style is None: + style = FormatterStyle() + + # Dispatch to appropriate internal formatter based on type + if isinstance(value, Operation): + return _format_operation(value, indent, style) + + if isinstance(value, Categorical): + return _format_categorical(value, indent, style) + + if isinstance(value, Float): + return _format_float(value, indent, style) + + if isinstance(value, Integer): + return _format_integer(value, indent, style) + + if isinstance(value, Resampled): + return _format_resampled(value, indent, style) + + if isinstance(value, Repeated): + return _format_repeated(value, indent, style) + + if isinstance(value, list | tuple): + return _format_sequence(value, indent, style) + + if isinstance(value, dict): + return _format_dict(value, indent, style) + + # Check for PipelineSpace (import here to avoid circular dependency) + from neps.space.neps_spaces.parameters import PipelineSpace + + if isinstance(value, PipelineSpace): + return _format_pipeline_space(value, indent, style) + + # For callables (functions, methods), show their name + if callable(value) and (name := getattr(value, "__name__", None)): + return name + + # For identifier strings, don't add quotes + if isinstance(value, str) and value.isidentifier(): + return value + + # For other values, use repr + return repr(value) + + +# ============================================================================ +# INTERNAL FORMATTERS - Type-specific formatting logic +# All these call format_value() for nested values to maintain consistency +# ============================================================================ + + +def _format_categorical( + categorical: Categorical, + indent: int, + style: FormatterStyle, +) -> str: + """Internal formatter for Categorical parameters.""" + indent_str = style.indent_str * indent + inner_indent_str = style.indent_str * (indent + 1) + + # Format each choice using format_value for consistency + formatted_choices = [] + for choice in categorical.choices: # type: ignore[union-attr] + choice_str = format_value(choice, indent + 2, style) + formatted_choices.append(choice_str) + + # Build the string with consistent indentation + choice_indent_str = style.indent_str * (indent + 2) + choices_str = f",\n{choice_indent_str}".join(formatted_choices) + result = ( + f"Categorical(\n{inner_indent_str}choices=" + f"(\n{choice_indent_str}{choices_str}\n{inner_indent_str})" + ) + + if categorical.has_prior: + prior_confidence_str = ( + categorical._prior_confidence.value + if hasattr(categorical._prior_confidence, "value") + else str(categorical._prior_confidence) + ) + result += ( + f",\n{inner_indent_str}prior={categorical._prior}," + f"\n{inner_indent_str}prior_confidence={prior_confidence_str}" + ) + + result += f"\n{indent_str})" + return result + + +def _format_float( + float_param: Float, + indent: int, # noqa: ARG001 + style: FormatterStyle, # noqa: ARG001 +) -> str: + """Internal formatter for Float parameters.""" + string = f"Float({float_param._lower}, {float_param._upper}" + if float_param._log: + string += ", log" + if float_param.has_prior: + prior_confidence_str = ( + float_param._prior_confidence.value + if hasattr(float_param._prior_confidence, "value") + else str(float_param._prior_confidence) + ) + string += f", prior={float_param._prior}, prior_confidence={prior_confidence_str}" + string += ")" + return string + + +def _format_integer( + integer_param: Integer, + indent: int, # noqa: ARG001 + style: FormatterStyle, # noqa: ARG001 +) -> str: + """Internal formatter for Integer parameters.""" + string = f"Integer({integer_param._lower}, {integer_param._upper}" + if integer_param._log: + string += ", log" + if integer_param.has_prior: + prior_confidence_str = ( + integer_param._prior_confidence.value + if hasattr(integer_param._prior_confidence, "value") + else str(integer_param._prior_confidence) + ) + string += ( + f", prior={integer_param._prior}, prior_confidence={prior_confidence_str}" + ) + string += ")" + return string + + +def _format_resampled( + resampled: Resampled, + indent: int, + style: FormatterStyle, +) -> str: + """Internal formatter for Resampled parameters.""" + source = resampled._source + + # Format the source using unified format_value + source_str = format_value(source, indent + 1, style) + + # Use multi-line format if source is multi-line + if "\n" in source_str: + indent_str = style.indent_str * indent + inner_indent_str = style.indent_str * (indent + 1) + return f"Resampled(\n{inner_indent_str}{source_str}\n{indent_str})" + + # Simple single-line format for basic types + return f"Resampled({source_str})" + + +def _format_repeated( + repeated: Repeated, + indent: int, + style: FormatterStyle, +) -> str: + """Internal formatter for Repeated parameters.""" + source_str = format_value(repeated._content, indent, style) + return f"Repeated({source_str})" + + +def _format_sequence( + seq: list | tuple, + indent: int, + style: FormatterStyle, +) -> str: + """Internal formatter for lists and tuples.""" + from neps.space.neps_spaces.parameters import Operation + + if not seq: + return "[]" if isinstance(seq, list) else "()" + + # Try compact format first + compact = repr(seq) + if len(compact) <= style.compact_threshold and "\n" not in compact: + return compact + + # Use expanded format for complex sequences + is_list = isinstance(seq, list) + bracket_open, bracket_close = ("[", "]") if is_list else ("(", ")") + + indent_str = style.indent_str * indent + inner_indent_str = style.indent_str * (indent + 1) + + # Check if any element is an Operation (needs expansion) + has_operations = any(isinstance(item, Operation) for item in seq) + + if has_operations: + # Full expansion with each item on its own line + lines = [bracket_open] + for item in seq: + formatted = format_value(item, indent + 1, style) + lines.append(f"{inner_indent_str}{formatted},") + lines.append(f"{indent_str}{bracket_close}") + return "\n".join(lines) + + # Simple items - try to fit multiple per line + lines = [bracket_open] + current_line: list[str] = [] + current_length = 0 + + for item in seq: + item_repr = repr(item) + item_len = len(item_repr) + 2 # +2 for ", " + + if current_line and current_length + item_len > style.max_line_length: + # Start new line + lines.append(f"{inner_indent_str}{', '.join(current_line)},") + current_line = [item_repr] + current_length = len(item_repr) + else: + current_line.append(item_repr) + current_length += item_len + + if current_line: + lines.append(f"{inner_indent_str}{', '.join(current_line)},") + + lines.append(f"{indent_str}{bracket_close}") + return "\n".join(lines) + + +def _format_dict( + d: dict, + indent: int, + style: FormatterStyle, +) -> str: + """Internal formatter for dictionaries.""" + if not d: + return "{}" + + # Try compact format first + compact = repr(d) + if len(compact) <= style.compact_threshold: + return compact + + # Use expanded format + indent_str = style.indent_str * indent + inner_indent_str = style.indent_str * (indent + 1) + + lines = ["{"] + for key, value in d.items(): + formatted_value = format_value(value, indent + 1, style) + lines.append(f"{inner_indent_str}{key!r}: {formatted_value},") + lines.append(f"{indent_str}}}") + return "\n".join(lines) + + +def _format_operation( + operation: Operation, + indent: int, + style: FormatterStyle, +) -> str: + """Internal formatter for Operation objects.""" + # Get operator name + operator_name = ( + operation.operator + if isinstance(operation.operator, str) + else operation.operator.__name__ + ) + + # Check if we have any args or kwargs + has_args = bool(operation.args) + has_kwargs = bool(operation.kwargs) + + if not (has_args or has_kwargs): + return f"{operator_name}()" if style.show_empty_args else operator_name + + # Always use multi-line format for consistency + indent_str = style.indent_str * indent + inner_indent_str = style.indent_str * (indent + 1) + + lines = [f"{operator_name}("] + + # Format args using format_value + if has_args: + for arg in operation.args: + formatted = format_value(arg, indent + 1, style) + lines.append(f"{inner_indent_str}{formatted},") + + # Format kwargs using format_value + if has_kwargs: + for key, value in operation.kwargs.items(): + formatted_value = format_value(value, indent + 1, style) + lines.append(f"{inner_indent_str}{key}={formatted_value},") + + lines.append(f"{indent_str})") + + return "\n".join(lines) + + +def _format_pipeline_space( + pipeline_space: Any, + indent: int, # noqa: ARG001 + style: FormatterStyle, +) -> str: + """Internal formatter for PipelineSpace objects.""" + lines = [f"{pipeline_space.__class__.__name__} with parameters:"] + for k, v in pipeline_space.get_attrs().items(): + if not k.startswith("_") and not callable(v): + # Use the unified formatter for all values + formatted_value = format_value(v, 0, style) + # If multi-line, indent all lines + if "\n" in formatted_value: + indented_value = "\n ".join(formatted_value.split("\n")) + lines.append(f" {k}:\n {indented_value}") + else: + lines.append(f" {k} = {formatted_value}") + return "\n".join(lines) + + +# ============================================================================ +# BACKWARD COMPATIBILITY - Legacy function names +# ============================================================================ + + +def operation_to_string( + operation: Operation | Any, + style: FormatterStyle | None = None, +) -> str: + """Convert an Operation to a pretty-formatted string. + + Legacy function for backward compatibility. + New code should use format_value() directly. + """ + return format_value(operation, 0, style) diff --git a/neps/status/status.py b/neps/status/status.py index 2843d711c..f5ab360b3 100644 --- a/neps/status/status.py +++ b/neps/status/status.py @@ -19,8 +19,8 @@ from neps.space.neps_spaces import neps_space from neps.space.neps_spaces.neps_space import NepsCompatConverter, PipelineSpace -from neps.space.neps_spaces.operation_formatter import ConfigString from neps.space.neps_spaces.sampling import OnlyPredefinedValuesSampler +from neps.space.neps_spaces.string_formatter import format_value from neps.state.neps_state import FileLocker, NePSState from neps.state.trial import State, Trial @@ -235,7 +235,7 @@ def formatted( # noqa: PLR0912 for variable in variables: operation = getattr(resolved_pipeline, variable) - pipeline_configs.append(ConfigString(operation).pretty_format()) + pipeline_configs.append(format_value(operation)) for n_pipeline, pipeline_config in enumerate(pipeline_configs): formatted_config = str(pipeline_config) diff --git a/neps_examples/basic_usage/1_hyperparameters.py b/neps_examples/basic_usage/1_hyperparameters.py index 871863e6d..3383aec1a 100644 --- a/neps_examples/basic_usage/1_hyperparameters.py +++ b/neps_examples/basic_usage/1_hyperparameters.py @@ -9,12 +9,15 @@ import numpy as np import neps + def evaluate_pipeline(float1, float2, categorical, integer1, integer2): objective_to_minimize = -float( np.sum([float1, float2, int(categorical), integer1, integer2]) ) - return {"objective_to_minimize": objective_to_minimize, "cost": categorical,} - + return { + "objective_to_minimize": objective_to_minimize, + "cost": categorical, + } class HPOSpace(neps.PipelineSpace): @@ -31,5 +34,4 @@ class HPOSpace(neps.PipelineSpace): pipeline_space=HPOSpace(), root_directory="results/hyperparameters_example", evaluations_to_spend=5, - overwrite_root_directory=True, ) diff --git a/neps_examples/basic_usage/3_architecture_search.py b/neps_examples/basic_usage/3_architecture_search.py index 75702dbbd..c302628a4 100644 --- a/neps_examples/basic_usage/3_architecture_search.py +++ b/neps_examples/basic_usage/3_architecture_search.py @@ -1,22 +1,41 @@ """ -This example demonstrates the full capabilities of NePS Spaces -by defining a neural network architecture using PyTorch modules. -It showcases how to interact with the NePS Spaces API to create, -sample and evaluate a neural network pipeline. -It also demonstrates how to convert the pipeline to a callable -and how to run NePS with the defined pipeline and space. +This example demonstrates neural architecture search using NePS Spaces to define and +optimize PyTorch models. The search space consists of a 3-cell sequential architecture +where each cell contains a Conv2d layer followed by an activation function. The Conv2d +kernel size is sampled from integers in [2, 7], and the activation is chosen from +{ReLU, Sigmoid, Tanh}. Each cell independently samples its kernel size and activation, +allowing NePS to explore diverse architectural configurations and find optimal designs. + +Search Space Structure: + model: Sequential( + Cell_1: Sequential( + Conv2d(kernel_size=, ...), + + ), + Cell_2: Sequential( + Conv2d(kernel_size=, ...), + + ), + Cell_3: Sequential( + Conv2d(kernel_size=, ...), + + ) + ) """ import numpy as np import torch import torch.nn as nn import neps +import logging # Define the NEPS space for the neural network architecture # It reuses the same building blocks multiple times, with different sampled parameters. class NN_Space(neps.PipelineSpace): + # Parameters with prefixed _ are internal and will not be given to the evaluation + # function _kernel_size = neps.Integer(2, 7) # Building blocks of the neural network architecture @@ -50,6 +69,7 @@ class NN_Space(neps.PipelineSpace): ) # The full model consisting of three cells stacked sequentially + # This will be given to the evaluation function as 'model' model = neps.Operation( operator=nn.Sequential, args=( @@ -71,12 +91,12 @@ def evaluate_pipeline(model: torch.nn.Module) -> float: if __name__ == "__main__": # Run NePS with the defined pipeline and space and show the best configuration pipeline_space = NN_Space() + logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, pipeline_space=pipeline_space, root_directory="results/architecture_search_example", evaluations_to_spend=5, - overwrite_root_directory=True, ) neps.status( "results/architecture_search_example", diff --git a/neps_examples/basic_usage/4_architecture_and_hyperparameters.py b/neps_examples/basic_usage/4_architecture_and_hyperparameters.py index 43192d30b..446c5a391 100644 --- a/neps_examples/basic_usage/4_architecture_and_hyperparameters.py +++ b/neps_examples/basic_usage/4_architecture_and_hyperparameters.py @@ -1,16 +1,43 @@ """ -This example demonstrates how to combine neural network architecture -search with hyperparameter optimization using NePS. +This example demonstrates joint optimization of neural architecture and hyperparameters +using NePS Spaces. The search space includes: (1) a 3-cell sequential architecture with +Conv2d layers (kernel size sampled from [2, 7]) and activations chosen from {ReLU, +Sigmoid, Tanh}, and (2) a batch_size hyperparameter sampled from integers in [16, 128]. +NePS simultaneously optimizes both architectural choices and training hyperparameters. + +Search Space Structure: + batch_size: + model: Sequential( + Cell_1: Sequential( + Conv2d(kernel_size=, ...), + + ), + Cell_2: Sequential( + Conv2d(kernel_size=, ...), + + ), + Cell_3: Sequential( + Conv2d(kernel_size=, ...), + + ) + ) """ import neps import torch import numpy as np from torch import nn +import logging + # Using the space from the architecture search example class NN_Space(neps.PipelineSpace): + # Integer Hyperparameter for the batch size + batch_size = neps.Integer(16, 128) + + # Parameters with prefixed _ are internal and will not be given to the evaluation + # function _kernel_size = neps.Integer(2, 7) # Building blocks of the neural network architecture @@ -44,6 +71,7 @@ class NN_Space(neps.PipelineSpace): ) # The full model consisting of three cells stacked sequentially + # This will be given to the evaluation function as 'model' model = neps.Operation( operator=nn.Sequential, args=( @@ -54,9 +82,6 @@ class NN_Space(neps.PipelineSpace): ) -# Extend the architecture search space with a hyperparameter -extended_space = NN_Space().add(neps.Integer(16, 128), name="batch_size") - def evaluate_pipeline(model: torch.nn.Module, batch_size: int) -> float: # For demonstration, we return a dummy objective value # In practice, you would train and evaluate the model here @@ -68,12 +93,12 @@ def evaluate_pipeline(model: torch.nn.Module, batch_size: int) -> float: if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, - pipeline_space=extended_space, + pipeline_space=NN_Space(), root_directory="results/architecture_with_hp_example", evaluations_to_spend=5, - overwrite_root_directory=True, ) neps.status( root_directory="results/architecture_with_hp_example", diff --git a/neps_examples/basic_usage/5_optimizer_search.py b/neps_examples/basic_usage/5_optimizer_search.py index f179c634b..8ea3f70e6 100644 --- a/neps_examples/basic_usage/5_optimizer_search.py +++ b/neps_examples/basic_usage/5_optimizer_search.py @@ -1,12 +1,25 @@ """ -This example demonstrates how to use NePS to search for an optimizer. We define -a search space that samples different configurations of a simple custom optimizer -built using PyTorch. The pipeline optimizes a simple quadratic function using the -sampled optimizer. NePS is then run to find the best optimizer configuration. +This example demonstrates optimizer search using NePS to design custom PyTorch optimizers +and find the best configuration. The search space defines a custom optimizer with three +gradient transformation functions sampled from {sqrt, log, exp, sign, abs}, a learning +rate sampled logarithmically from [0.0001, 0.01], and gradient clipping from [0.5, 1.0]. +NePS evaluates each optimizer by optimizing a quadratic function to discover the most +effective combination of gradient transformations and hyperparameters for convergence. + +Search Space Structure: + optimizer_class: optimizer_constructor( + , + , + , + learning_rate=, + gradient_clipping= + ) """ import neps import torch +import logging + def optimizer_constructor(*functions, gradient_clipping: float, learning_rate: float): # Build a simple optimizer that applies a sequence of functions to the gradients @@ -38,6 +51,9 @@ def step(self, _closure=None): # The search space defines the optimizer class constructed with sampled hyperparameters # and functions class OptimizerSpace(neps.PipelineSpace): + + # Parameters with prefixed _ are internal and will not be given to the evaluation + # function _gradient_clipping = neps.Float(0.5, 1.0) _learning_rate = neps.Float(0.0001, 0.01, log=True) @@ -45,7 +61,7 @@ class OptimizerSpace(neps.PipelineSpace): choices=(torch.sqrt, torch.log, torch.exp, torch.sign, torch.abs) ) - + # The optimizer class constructed with the sampled functions and hyperparameters optimizer_class = neps.Operation( operator=optimizer_constructor, args=( @@ -78,12 +94,13 @@ def evaluate_pipeline(optimizer_class) -> float: # Run NePS with the defined pipeline and space and show the best configuration if __name__ == "__main__": pipeline_space = OptimizerSpace() + + logging.basicConfig(level=logging.INFO) neps.run( evaluate_pipeline=evaluate_pipeline, pipeline_space=pipeline_space, root_directory="results/optimizer_search_example", evaluations_to_spend=5, - overwrite_root_directory=True, ) neps.status( root_directory="results/optimizer_search_example", diff --git a/neps_examples/convenience/create_and_import_custom_config.py b/neps_examples/convenience/create_and_import_custom_config.py index aebc41484..ef8031af0 100644 --- a/neps_examples/convenience/create_and_import_custom_config.py +++ b/neps_examples/convenience/create_and_import_custom_config.py @@ -30,8 +30,6 @@ class ExampleSpace(neps.PipelineSpace): config, pipeline = neps.create_config(ExampleSpace()) print("Created configuration:") pprint(config) - print("Sampled pipeline:") - pprint(pipeline) logging.basicConfig(level=logging.INFO) diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index a83557bf8..86cdd92a0 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -3,7 +3,7 @@ import pytest import neps.space.neps_spaces.sampling -from neps.space.neps_spaces import neps_space, operation_formatter +from neps.space.neps_spaces import neps_space, string_formatter from neps.space.neps_spaces.parameters import ( Categorical, Operation, @@ -176,7 +176,7 @@ def test_resolve(): s = resolved_pipeline.S s_config_string = neps_space.convert_operation_to_string(s) assert s_config_string - pretty_config = operation_formatter.ConfigString(s_config_string).pretty_format() + pretty_config = string_formatter.format_value(s) assert pretty_config @@ -192,7 +192,7 @@ def test_resolve_alt(): s = resolved_pipeline.S s_config_string = neps_space.convert_operation_to_string(s) assert s_config_string - pretty_config = operation_formatter.ConfigString(s_config_string).pretty_format() + pretty_config = string_formatter.format_value(s) assert pretty_config diff --git a/tests/test_neps_space/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py index 9c0bd5cc1..07a5c14ba 100644 --- a/tests/test_neps_space/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -3,7 +3,7 @@ import pytest import neps.space.neps_spaces.sampling -from neps.space.neps_spaces import neps_space, operation_formatter +from neps.space.neps_spaces import neps_space, string_formatter from neps.space.neps_spaces.parameters import ( Categorical, Float, @@ -219,13 +219,13 @@ def test_hnas_like_string(): arch = resolved_pipeline.ARCH arch_config_string = neps_space.convert_operation_to_string(arch) assert arch_config_string - pretty_config = operation_formatter.ConfigString(arch_config_string).pretty_format() + pretty_config = string_formatter.format_value(arch) assert pretty_config cl = resolved_pipeline.CL cl_config_string = neps_space.convert_operation_to_string(cl) assert cl_config_string - pretty_config = operation_formatter.ConfigString(cl_config_string).pretty_format() + pretty_config = string_formatter.format_value(cl) assert pretty_config diff --git a/tests/test_neps_space/test_search_space__nos_like.py b/tests/test_neps_space/test_search_space__nos_like.py index 49a979a37..9dd784e94 100644 --- a/tests/test_neps_space/test_search_space__nos_like.py +++ b/tests/test_neps_space/test_search_space__nos_like.py @@ -2,7 +2,7 @@ import pytest -from neps.space.neps_spaces import neps_space, operation_formatter +from neps.space.neps_spaces import neps_space, string_formatter from neps.space.neps_spaces.parameters import ( Categorical, Integer, @@ -129,5 +129,5 @@ def test_resolve(): p = resolved_pipeline.P p_config_string = neps_space.convert_operation_to_string(p) assert p_config_string - pretty_config = operation_formatter.ConfigString(p_config_string).pretty_format() + pretty_config = string_formatter.format_value(p) assert pretty_config diff --git a/tests/test_neps_space/test_operation_formatter.py b/tests/test_neps_space/test_string_formatter.py similarity index 99% rename from tests/test_neps_space/test_operation_formatter.py rename to tests/test_neps_space/test_string_formatter.py index 7d6169cc6..e022a5500 100644 --- a/tests/test_neps_space/test_operation_formatter.py +++ b/tests/test_neps_space/test_string_formatter.py @@ -1,13 +1,13 @@ -"""Comprehensive tests for operation_formatter module.""" +"""Comprehensive tests for string_formatter module.""" from __future__ import annotations import neps -from neps.space.neps_spaces.operation_formatter import ( +from neps.space.neps_spaces.parameters import Operation +from neps.space.neps_spaces.string_formatter import ( FormatterStyle, operation_to_string, ) -from neps.space.neps_spaces.parameters import Operation def test_simple_operation_no_args(): From ba314fdeaf6bafe5b72ee2460cabc0c3905bf85e Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 28 Nov 2025 12:46:44 +0100 Subject: [PATCH 135/156] Refactor string formatting and resampling in NePS - Introduced a helper function `_format_prior_confidence` for consistent formatting of prior confidence values in `string_formatter.py`. - Updated various instances in the codebase to utilize the new `_format_prior_confidence` function, improving code readability and maintainability. - Replaced `neps.Resampled(...)` calls with the new `.resample()` method across multiple example scripts and test files, ensuring consistency in how resampling is handled. - Cleaned up formatting in test cases to enhance clarity and maintain a consistent style. - Adjusted the formatting of operation strings in `test_string_formatter.py` to ensure proper indentation and alignment. --- docs/reference/neps_spaces.md | 24 +- neps/space/neps_spaces/neps_space.py | 2 +- neps/space/neps_spaces/parameters.py | 215 ++++++++++-------- neps/space/neps_spaces/string_formatter.py | 34 +-- .../basic_usage/3_architecture_search.py | 12 +- .../4_architecture_and_hyperparameters.py | 12 +- .../basic_usage/5_optimizer_search.py | 10 +- .../create_and_import_custom_config.py | 5 +- .../test_neps_space/test_neps_integration.py | 15 +- .../test_pipeline_space_methods.py | 2 +- .../test_search_space__grammar_like.py | 80 +++---- .../test_search_space__hnas_like.py | 75 +++--- .../test_search_space__nos_like.py | 42 ++-- .../test_search_space__recursion.py | 2 +- .../test_search_space__resampled.py | 23 +- ...test_space_conversion_and_compatibility.py | 3 +- .../test_neps_space/test_string_formatter.py | 86 +++---- 17 files changed, 335 insertions(+), 307 deletions(-) diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index faffb92be..b89600a2c 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -84,8 +84,8 @@ import torch.nn class NNSpace(PipelineSpace): # Defining operations for different activation functions - _relu = Operation(operator=torch.nn.ReLU) - _sigmoid = Operation(operator=torch.nn.Sigmoid) + _relu = neps.Operation(operator=torch.nn.ReLU) + _sigmoid = neps.Operation(operator=torch.nn.Sigmoid) # We can then search over these operations and use them in the evaluation function activation_function = neps.Categorical(choices=(_relu, _sigmoid)) @@ -148,9 +148,9 @@ def evaluate_pipeline( Until now all parameters are sampled once and their value used for all occurrences. This section describes how to resample parameters in different contexts using: -- [`neps.Resampled`][neps.space.neps_spaces.parameters.Resampled]: Resample from an existing parameters range +- [`.resample()`][neps.space.neps_spaces.parameters.Resampled]: Resample from an existing parameters range -With `neps.Resampled` you can reuse a parameter, even themselves recursively, but with a new value each time: +With `.resample()` you can reuse a parameter, even themselves recursively, but with a new value each time: ```python class ResampledSpace(neps.PipelineSpace): @@ -158,7 +158,7 @@ class ResampledSpace(neps.PipelineSpace): # The resampled parameter will have the same range but will be sampled # independently, so it can take a different value than its source - resampled_float = neps.Resampled(source=float_param) + resampled_float = float_param.resample() ``` This is especially useful for defining complex architectures, where e.g. a cell block is defined and then resampled multiple times to create a neural network architecture: @@ -172,14 +172,14 @@ class CNN_Space(neps.PipelineSpace): # Each instance will be identically but independently sampled _cell_block = neps.Operation( operator=torch.nn.Conv2d, - kwargs={"kernel_size": neps.Resampled(source=_kernel_size)} + kwargs={"kernel_size": _kernel_size.resample()} ) # Resample the cell block multiple times to create a convolutional neural network cnn = torch.nn.Sequential( - neps.Resampled(_cell_block), - neps.Resampled(_cell_block), - neps.Resampled(_cell_block), + _cell_block.resample(), + _cell_block.resample(), + _cell_block.resample(), ) def evaluate_pipeline(cnn: torch.nn.Module): @@ -190,15 +190,15 @@ def evaluate_pipeline(cnn: torch.nn.Module): ??? info "Self- and future references" - When referencing itself or a not yet defined parameter (to enable recursions) use a string of that parameters name: + When referencing itself or a not yet defined parameter (to enable recursions) use a string of that parameters name with `neps.Resampled("parameter_name")`, like so: ```python self_reference = Categorical( choices=( # It will either choose to resample itself twice - (Resampled("self_reference"), Resampled("self_reference")), + (neps.Resampled("self_reference"), neps.Resampled("self_reference")), # Or it will sample the future parameter - (Resampled("future_param"),), + (neps.Resampled("future_param"),), ) ) # This results in a (possibly infinite) tuple of independently sampled future_params diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 022f11993..91c3bbcc4 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -619,7 +619,7 @@ def _( # That will then be the object to resample. referenced_obj_name = cast(str, resampled_obj.source) referenced_obj = getattr(context.resolution_root, referenced_obj_name) - resampled_obj = Resampled(referenced_obj) + resampled_obj = referenced_obj.resample() initial_attrs = resampled_obj.get_attrs() resolvable_to_resample_obj = resampled_obj.from_attrs(initial_attrs) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 3b59c603a..e6ed7f133 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -24,6 +24,37 @@ T = TypeVar("T") +# Shared docstring constants for DRY +_RESAMPLE_DOCSTRING = """Wrap this {type_name} in a Resampled container. + +This allows resampling the {type_name} each time it's resolved, useful for +creating dynamic structures where the same {description} is sampled multiple +times independently. + +Returns: + A Resampled instance wrapping this {type_name}. + +Example: + ```python + # Instead of: neps.Resampled(my_{type_lower}) + # You can write: my_{type_lower}.resample() + ``` +""" + +_COMPARE_DOMAIN_DOCSTRING = """Check if this {type_name} parameter is equivalent to \ +another. + +This method provides comparison logic without interfering with Python's +object identity system (unlike __eq__). Use this for functional comparisons +like checking if parameters have the same configuration. + +Args: + other: The object to compare with. + +Returns: + True if the objects are equivalent, False otherwise. +""" + class _Unset: def __repr__(self) -> str: @@ -150,19 +181,8 @@ def __str__(self) -> str: """Get a string representation of the fidelity.""" return f"Fidelity({self.domain.__str__()})" - def compare_domain_to(self, other: object) -> bool: - """Check if this fidelity parameter is equivalent to another. - - This method provides comparison logic without interfering with Python's - object identity system (unlike __eq__). Use this for functional comparisons - like checking if parameters have the same configuration. - - Args: - other: The object to compare with. - - Returns: - True if the objects are equivalent, False otherwise. - """ + def compare_domain_to(self, other: object) -> bool: # noqa: D102 + # Docstring set dynamically below if not isinstance(other, Fidelity): return False return self.domain == other.domain @@ -376,7 +396,7 @@ def add( module = sys.modules[NewSpace.__module__] setattr(module, new_class_name, NewSpace) - return cast(PipelineSpace, NewSpace()) + return cast("PipelineSpace", NewSpace()) def remove(self, name: str) -> PipelineSpace: """Remove a parameter from the pipeline by its name. This is NOT an in-place @@ -417,7 +437,7 @@ def remove(self, name: str) -> PipelineSpace: module = sys.modules[NewSpace.__module__] setattr(module, new_class_name, NewSpace) - return cast(PipelineSpace, NewSpace()) + return cast("PipelineSpace", NewSpace()) def add_prior( self, @@ -483,7 +503,7 @@ def add_prior( module = sys.modules[NewSpace.__module__] setattr(module, new_class_name, NewSpace) - return cast(PipelineSpace, NewSpace()) + return cast("PipelineSpace", NewSpace()) class ConfidenceLevel(enum.Enum): @@ -620,6 +640,24 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Domain[T]: """ return type(self)(**attrs) + def resample(self) -> Resampled: + """Wrap this domain in a Resampled container. + + This allows resampling the domain each time it's resolved, useful for + creating dynamic structures where the same parameter definition is + sampled multiple times independently. + + Returns: + A Resampled instance wrapping this domain. + + Example: + ```python + # Instead of: neps.Resampled(neps.Integer(1, 10)) + # You can write: neps.Integer(1, 10).resample() + ``` + """ + return Resampled(self) + def _calculate_new_domain_bounds( number_type: type[int] | type[float], @@ -747,19 +785,8 @@ def __str__(self) -> str: return format_value(self) - def compare_domain_to(self, other: object) -> bool: - """Check if this categorical parameter is equivalent to another. - - This method provides comparison logic without interfering with Python's - object identity system (unlike __eq__). Use this for functional comparisons - like checking if parameters have the same configuration. - - Args: - other: The object to compare with. - - Returns: - True if the objects are equivalent, False otherwise. - """ + def compare_domain_to(self, other: object) -> bool: # noqa: D102 + # Docstring set dynamically below if not isinstance(other, Categorical): return False return ( @@ -787,7 +814,7 @@ def upper(self) -> int: minus one. """ - return max(len(cast(tuple, self._choices)) - 1, 0) + return max(len(cast("tuple", self._choices)) - 1, 0) @property def choices(self) -> tuple[T | Domain[T] | Resolvable, ...] | Domain[T]: @@ -825,7 +852,7 @@ def prior(self) -> int: """ if not self.has_prior: raise ValueError("Domain has no prior and prior_confidence defined.") - return int(cast(int, self._prior)) + return int(cast("int", self._prior)) @property def prior_confidence(self) -> ConfidenceLevel: @@ -840,7 +867,7 @@ def prior_confidence(self) -> ConfidenceLevel: """ if not self.has_prior: raise ValueError("Domain has no prior and prior_confidence defined.") - return cast(ConfidenceLevel, self._prior_confidence) + return cast("ConfidenceLevel", self._prior_confidence) @property def range_compatibility_identifier(self) -> str: @@ -850,7 +877,7 @@ def range_compatibility_identifier(self) -> str: A string representation of the number of choices in the domain. """ - return f"{len(cast(tuple, self._choices))}" + return f"{len(cast('tuple', self._choices))}" def sample(self) -> int: """Sample a random index from the categorical choices. @@ -862,7 +889,7 @@ def sample(self) -> int: ValueError: If the choices are empty. """ - return int(random.randint(0, len(cast(tuple[T], self._choices)) - 1)) + return int(random.randint(0, len(cast("tuple[T]", self._choices)) - 1)) def centered_around( self, @@ -886,7 +913,7 @@ def centered_around( """ new_min, new_max = cast( - tuple[int, int], + "tuple[int, int]", _calculate_new_domain_bounds( number_type=int, lower=self.lower, @@ -895,10 +922,10 @@ def centered_around( confidence=confidence, ), ) - new_choices = cast(tuple, self._choices)[new_min : new_max + 1] + new_choices = cast("tuple", self._choices)[new_min : new_max + 1] return Categorical( choices=new_choices, - prior=new_choices.index(cast(tuple, self._choices)[center]), + prior=new_choices.index(cast("tuple", self._choices)[center]), prior_confidence=confidence, ) @@ -971,19 +998,8 @@ def __str__(self) -> str: return format_value(self) - def compare_domain_to(self, other: object) -> bool: - """Check if this float parameter is equivalent to another. - - This method provides comparison logic without interfering with Python's - object identity system (unlike __eq__). Use this for functional comparisons - like checking if parameters have the same configuration. - - Args: - other: The object to compare with. - - Returns: - True if the objects are equivalent, False otherwise. - """ + def compare_domain_to(self, other: object) -> bool: # noqa: D102 + # Docstring set dynamically below if not isinstance(other, Float): return False return ( @@ -1053,7 +1069,7 @@ def prior(self) -> float: """ if not self.has_prior: raise ValueError("Domain has no prior and prior_confidence defined.") - return float(cast(float, self._prior)) + return float(cast("float", self._prior)) @property def prior_confidence(self) -> ConfidenceLevel: @@ -1068,7 +1084,7 @@ def prior_confidence(self) -> ConfidenceLevel: """ if not self.has_prior: raise ValueError("Domain has no prior and prior_confidence defined.") - return cast(ConfidenceLevel, self._prior_confidence) + return cast("ConfidenceLevel", self._prior_confidence) @property def range_compatibility_identifier(self) -> str: @@ -1201,19 +1217,8 @@ def __str__(self) -> str: return format_value(self) - def compare_domain_to(self, other: object) -> bool: - """Check if this integer parameter is equivalent to another. - - This method provides comparison logic without interfering with Python's - object identity system (unlike __eq__). Use this for functional comparisons - like checking if parameters have the same configuration. - - Args: - other: The object to compare with. - - Returns: - True if the objects are equivalent, False otherwise. - """ + def compare_domain_to(self, other: object) -> bool: # noqa: D102 + # Docstring set dynamically below if not isinstance(other, Integer): return False return ( @@ -1283,7 +1288,7 @@ def prior(self) -> int: """ if not self.has_prior: raise ValueError("Domain has no prior and prior_confidence defined.") - return int(cast(int, self._prior)) + return int(cast("int", self._prior)) @property def prior_confidence(self) -> ConfidenceLevel: @@ -1298,7 +1303,7 @@ def prior_confidence(self) -> ConfidenceLevel: """ if not self.has_prior: raise ValueError("Domain has no prior and prior_confidence defined.") - return cast(ConfidenceLevel, self._prior_confidence) + return cast("ConfidenceLevel", self._prior_confidence) @property def range_compatibility_identifier(self) -> str: @@ -1345,7 +1350,7 @@ def centered_around( """ new_min, new_max = cast( - tuple[int, int], + "tuple[int, int]", _calculate_new_domain_bounds( number_type=int, lower=self.lower, @@ -1407,19 +1412,12 @@ def __str__(self) -> str: return format_value(self) - def compare_domain_to(self, other: object) -> bool: - """Check if this operation parameter is equivalent to another. + def resample(self) -> Resampled: # noqa: D102 + # Docstring set dynamically below + return Resampled(self) - This method provides comparison logic without interfering with Python's - object identity system (unlike __eq__). Use this for functional comparisons - like checking if parameters have the same configuration. - - Args: - other: The object to compare with. - - Returns: - True if the objects are equivalent, False otherwise. - """ + def compare_domain_to(self, other: object) -> bool: # noqa: D102 + # Docstring set dynamically below if not isinstance(other, Operation): return False return ( @@ -1595,19 +1593,8 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: ) return self._source.from_attrs(attrs) - def compare_domain_to(self, other: object) -> bool: - """Check if this resampled parameter is equivalent to another. - - This method provides comparison logic without interfering with Python's - object identity system (unlike __eq__). Use this for functional comparisons - like checking if parameters have the same configuration. - - Args: - other: The object to compare with. - - Returns: - True if the objects are equivalent, False otherwise. - """ + def compare_domain_to(self, other: object) -> bool: # noqa: D102 + # Docstring set dynamically below if not isinstance(other, Resampled): return False return self.source == other.source @@ -1657,6 +1644,10 @@ def content(self) -> Resolvable | Any: """ return self._content + def resample(self) -> Resampled: # noqa: D102 + # Docstring set dynamically below + return Resampled(self) + def get_attrs(self) -> Mapping[str, Any]: """Get the attributes of the resolvable as a mapping. @@ -1708,6 +1699,10 @@ def content(self) -> Resolvable | tuple[Any] | str: """ return self._content + def resample(self) -> Resampled: # noqa: D102 + # Docstring set dynamically below + return Resampled(self) + def get_attrs(self) -> Mapping[str, Any]: """Get the attributes of the lazy resolvable as a mapping. @@ -1743,3 +1738,37 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: # noqa: ARG002 # resolvable objects that in general will interact with them # through these two methods. # When they raise, then the traversal will not be possible. + + +# Set docstrings dynamically to maintain DRY principle +# This avoids repeating identical documentation across multiple classes +Operation.resample.__doc__ = _RESAMPLE_DOCSTRING.format( + type_name="operation", + description="operation", + type_lower="operation", +) +Repeated.resample.__doc__ = _RESAMPLE_DOCSTRING.format( + type_name="repeated structure", + description="repeated structure", + type_lower="repeated", +) +Lazy.resample.__doc__ = _RESAMPLE_DOCSTRING.format( + type_name="lazy value", + description="lazy value", + type_lower="lazy", +) + +Fidelity.compare_domain_to.__doc__ = _COMPARE_DOMAIN_DOCSTRING.format( + type_name="fidelity" +) +Categorical.compare_domain_to.__doc__ = _COMPARE_DOMAIN_DOCSTRING.format( + type_name="categorical" +) +Float.compare_domain_to.__doc__ = _COMPARE_DOMAIN_DOCSTRING.format(type_name="float") +Integer.compare_domain_to.__doc__ = _COMPARE_DOMAIN_DOCSTRING.format(type_name="integer") +Operation.compare_domain_to.__doc__ = _COMPARE_DOMAIN_DOCSTRING.format( + type_name="operation" +) +Resampled.compare_domain_to.__doc__ = _COMPARE_DOMAIN_DOCSTRING.format( + type_name="resampled" +) diff --git a/neps/space/neps_spaces/string_formatter.py b/neps/space/neps_spaces/string_formatter.py index 158a7eb9c..b327bea7c 100644 --- a/neps/space/neps_spaces/string_formatter.py +++ b/neps/space/neps_spaces/string_formatter.py @@ -129,6 +129,22 @@ def format_value( # noqa: C901, PLR0911 # ============================================================================ +def _format_prior_confidence(prior_confidence: Any) -> str: + """Internal helper to format prior_confidence values consistently. + + Args: + prior_confidence: The prior confidence value (typically a ConfidenceLevel enum) + + Returns: + String representation of the prior confidence + """ + return ( + prior_confidence.value + if hasattr(prior_confidence, "value") + else str(prior_confidence) + ) + + def _format_categorical( categorical: Categorical, indent: int, @@ -153,11 +169,7 @@ def _format_categorical( ) if categorical.has_prior: - prior_confidence_str = ( - categorical._prior_confidence.value - if hasattr(categorical._prior_confidence, "value") - else str(categorical._prior_confidence) - ) + prior_confidence_str = _format_prior_confidence(categorical._prior_confidence) result += ( f",\n{inner_indent_str}prior={categorical._prior}," f"\n{inner_indent_str}prior_confidence={prior_confidence_str}" @@ -177,11 +189,7 @@ def _format_float( if float_param._log: string += ", log" if float_param.has_prior: - prior_confidence_str = ( - float_param._prior_confidence.value - if hasattr(float_param._prior_confidence, "value") - else str(float_param._prior_confidence) - ) + prior_confidence_str = _format_prior_confidence(float_param._prior_confidence) string += f", prior={float_param._prior}, prior_confidence={prior_confidence_str}" string += ")" return string @@ -197,11 +205,7 @@ def _format_integer( if integer_param._log: string += ", log" if integer_param.has_prior: - prior_confidence_str = ( - integer_param._prior_confidence.value - if hasattr(integer_param._prior_confidence, "value") - else str(integer_param._prior_confidence) - ) + prior_confidence_str = _format_prior_confidence(integer_param._prior_confidence) string += ( f", prior={integer_param._prior}, prior_confidence={prior_confidence_str}" ) diff --git a/neps_examples/basic_usage/3_architecture_search.py b/neps_examples/basic_usage/3_architecture_search.py index c302628a4..5da92decb 100644 --- a/neps_examples/basic_usage/3_architecture_search.py +++ b/neps_examples/basic_usage/3_architecture_search.py @@ -45,7 +45,7 @@ class NN_Space(neps.PipelineSpace): kwargs={ "in_channels": 3, "out_channels": 3, - "kernel_size": neps.Resampled(_kernel_size), + "kernel_size": _kernel_size.resample(), "padding": "same", }, ) @@ -63,8 +63,8 @@ class NN_Space(neps.PipelineSpace): _cell = neps.Operation( operator=nn.Sequential, args=( - neps.Resampled(_conv), - neps.Resampled(_nonlinearity), + _conv.resample(), + _nonlinearity.resample(), ), ) @@ -73,9 +73,9 @@ class NN_Space(neps.PipelineSpace): model = neps.Operation( operator=nn.Sequential, args=( - neps.Resampled(_cell), - neps.Resampled(_cell), - neps.Resampled(_cell), + _cell.resample(), + _cell.resample(), + _cell.resample(), ), ) diff --git a/neps_examples/basic_usage/4_architecture_and_hyperparameters.py b/neps_examples/basic_usage/4_architecture_and_hyperparameters.py index 446c5a391..6ab6896f1 100644 --- a/neps_examples/basic_usage/4_architecture_and_hyperparameters.py +++ b/neps_examples/basic_usage/4_architecture_and_hyperparameters.py @@ -47,7 +47,7 @@ class NN_Space(neps.PipelineSpace): kwargs={ "in_channels": 3, "out_channels": 3, - "kernel_size": neps.Resampled(_kernel_size), + "kernel_size": _kernel_size.resample(), "padding": "same", }, ) @@ -65,8 +65,8 @@ class NN_Space(neps.PipelineSpace): _cell = neps.Operation( operator=nn.Sequential, args=( - neps.Resampled(_conv), - neps.Resampled(_nonlinearity), + _conv.resample(), + _nonlinearity.resample(), ), ) @@ -75,9 +75,9 @@ class NN_Space(neps.PipelineSpace): model = neps.Operation( operator=nn.Sequential, args=( - neps.Resampled(_cell), - neps.Resampled(_cell), - neps.Resampled(_cell), + _cell.resample(), + _cell.resample(), + _cell.resample(), ), ) diff --git a/neps_examples/basic_usage/5_optimizer_search.py b/neps_examples/basic_usage/5_optimizer_search.py index 8ea3f70e6..dcebea9f1 100644 --- a/neps_examples/basic_usage/5_optimizer_search.py +++ b/neps_examples/basic_usage/5_optimizer_search.py @@ -65,13 +65,13 @@ class OptimizerSpace(neps.PipelineSpace): optimizer_class = neps.Operation( operator=optimizer_constructor, args=( - neps.Resampled(_functions), - neps.Resampled(_functions), - neps.Resampled(_functions), + _functions.resample(), + _functions.resample(), + _functions.resample(), ), kwargs={ - "learning_rate": neps.Resampled(_learning_rate), - "gradient_clipping": neps.Resampled(_gradient_clipping), + "learning_rate": _learning_rate.resample(), + "gradient_clipping": _gradient_clipping.resample(), }, ) diff --git a/neps_examples/convenience/create_and_import_custom_config.py b/neps_examples/convenience/create_and_import_custom_config.py index ef8031af0..467162f5a 100644 --- a/neps_examples/convenience/create_and_import_custom_config.py +++ b/neps_examples/convenience/create_and_import_custom_config.py @@ -17,8 +17,8 @@ class ExampleSpace(neps.PipelineSpace): "option2", neps.Operation( operator="option3", - args=(float1, neps.Resampled(cat1)), - kwargs={"param1": neps.Resampled(float1)}, + args=(float1, cat1.resample()), + kwargs={"param1": float1.resample()}, ), ] ) @@ -31,7 +31,6 @@ class ExampleSpace(neps.PipelineSpace): print("Created configuration:") pprint(config) - logging.basicConfig(level=logging.INFO) # The created configuration can then be used as an imported trial in NePS optimizers. # We demonstrate this with the fictional result of objective_to_minimize = 0.5 diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index d467a67c4..f2bc8334a 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -21,7 +21,6 @@ Integer, Operation, PipelineSpace, - Resampled, ) @@ -124,16 +123,16 @@ class DemoHyperparameterComplexSpace(PipelineSpace): float1 = Categorical( choices=( - Resampled(_small_float), - Resampled(_big_float), + _small_float.resample(), + _big_float.resample(), ), prior=0, prior_confidence=ConfidenceLevel.MEDIUM, ) float2 = Categorical( choices=( - Resampled(_small_float), - Resampled(_big_float), + _small_float.resample(), + _big_float.resample(), float1, ), prior=0, @@ -304,7 +303,7 @@ class DemoOperationSpace(PipelineSpace): # `MultipliedSum(factor=0.2)` _multiplied_sum = Operation( operator=MultipliedSum, - kwargs={"factor": Resampled(_factor)}, + kwargs={"factor": _factor.resample()}, ) # Model @@ -317,7 +316,7 @@ class DemoOperationSpace(PipelineSpace): model = Operation( operator=Model, args=(_inner_function,), - kwargs={"factor": Resampled(_factor)}, + kwargs={"factor": _factor.resample()}, ) # An additional hyperparameter @@ -508,7 +507,7 @@ class ComplexNepsSpace(PipelineSpace): # Operation with resampled parameters operation = Operation( operator=lambda x, y: x * y, - args=(factor, Resampled(factor)), + args=(factor, factor.resample()), ) # Categorical with operations as choices diff --git a/tests/test_neps_space/test_pipeline_space_methods.py b/tests/test_neps_space/test_pipeline_space_methods.py index a720f3e01..98155f04c 100644 --- a/tests/test_neps_space/test_pipeline_space_methods.py +++ b/tests/test_neps_space/test_pipeline_space_methods.py @@ -77,7 +77,7 @@ def test_add_method_different_types(): assert isinstance(space.get_attrs()["new_op"], Operation) # Add Resampled - resampled = Resampled(space.get_attrs()["x"]) + resampled = space.get_attrs()["x"].resample() space = space.add(resampled, "new_resampled") assert "new_resampled" in space.get_attrs() assert isinstance(space.get_attrs()["new_resampled"], Resampled) diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index 86cdd92a0..6b0f23cbd 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -22,12 +22,12 @@ class GrammarLike(PipelineSpace): _C0 = Operation( operator="Sequential", - args=(Resampled(_O),), + args=(_O.resample(),), ) _C1 = Operation( operator="Sequential", args=( - Resampled(_O), + _O.resample(), Resampled("S"), _reluconvbn, ), @@ -35,7 +35,7 @@ class GrammarLike(PipelineSpace): _C2 = Operation( operator="Sequential", args=( - Resampled(_O), + _O.resample(), Resampled("S"), ), ) @@ -45,16 +45,16 @@ class GrammarLike(PipelineSpace): ) _C = Categorical( choices=( - Resampled(_C0), - Resampled(_C1), - Resampled(_C2), - Resampled(_C3), + _C0.resample(), + _C1.resample(), + _C2.resample(), + _C3.resample(), ), ) _S0 = Operation( operator="Sequential", - args=(Resampled(_C),), + args=(_C.resample(),), ) _S1 = Operation( operator="Sequential", @@ -68,15 +68,15 @@ class GrammarLike(PipelineSpace): operator="Sequential", args=( Resampled("S"), - Resampled(_C), + _C.resample(), ), ) _S4 = Operation( operator="Sequential", args=( - Resampled(_O), - Resampled(_O), - Resampled(_O), + _O.resample(), + _O.resample(), + _O.resample(), ), ) _S5 = Operation( @@ -84,22 +84,22 @@ class GrammarLike(PipelineSpace): args=( Resampled("S"), Resampled("S"), - Resampled(_O), - Resampled(_O), - Resampled(_O), - Resampled(_O), - Resampled(_O), - Resampled(_O), + _O.resample(), + _O.resample(), + _O.resample(), + _O.resample(), + _O.resample(), + _O.resample(), ), ) S = Categorical( choices=( - Resampled(_S0), - Resampled(_S1), - Resampled(_S2), - Resampled(_S3), - Resampled(_S4), - Resampled(_S5), + _S0.resample(), + _S1.resample(), + _S2.resample(), + _S3.resample(), + _S4.resample(), + _S5.resample(), ), ) @@ -114,14 +114,14 @@ class GrammarLikeAlt(PipelineSpace): _C_ARGS = Categorical( choices=( - (Resampled(_O),), + (_O.resample(),), ( - Resampled(_O), + _O.resample(), Resampled("S"), _reluconvbn, ), ( - Resampled(_O), + _O.resample(), Resampled("S"), ), (Resampled("S"),), @@ -129,38 +129,38 @@ class GrammarLikeAlt(PipelineSpace): ) _C = Operation( operator="Sequential", - args=Resampled(_C_ARGS), + args=_C_ARGS.resample(), ) _S_ARGS = Categorical( choices=( - (Resampled(_C),), + (_C.resample(),), (_reluconvbn,), (Resampled("S"),), ( Resampled("S"), - Resampled(_C), + _C.resample(), ), ( - Resampled(_O), - Resampled(_O), - Resampled(_O), + _O.resample(), + _O.resample(), + _O.resample(), ), ( Resampled("S"), Resampled("S"), - Resampled(_O), - Resampled(_O), - Resampled(_O), - Resampled(_O), - Resampled(_O), - Resampled(_O), + _O.resample(), + _O.resample(), + _O.resample(), + _O.resample(), + _O.resample(), + _O.resample(), ), ), ) S = Operation( operator="Sequential", - args=Resampled(_S_ARGS), + args=_S_ARGS.resample(), ) diff --git a/tests/test_neps_space/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py index 07a5c14ba..17169d315 100644 --- a/tests/test_neps_space/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -9,7 +9,6 @@ Float, Operation, PipelineSpace, - Resampled, ) @@ -61,31 +60,31 @@ class HNASLikePipeline(PipelineSpace): args=( _ACT, _CONV, - Resampled(_NORM), + _NORM.resample(), ), ) _CONVBLOCK_FULL = Operation( operator="OPS Sequential1", - args=(Resampled(_CONVBLOCK),), + args=(_CONVBLOCK.resample(),), ) _OP = Categorical( choices=( Operation(operator="OPS zero"), Operation(operator="OPS id"), Operation(operator="OPS avg_pool"), - Resampled(_CONVBLOCK_FULL), + _CONVBLOCK_FULL.resample(), ), ) CL = Operation( operator="CELL Cell", args=( - Resampled(_OP), - Resampled(_OP), - Resampled(_OP), - Resampled(_OP), - Resampled(_OP), - Resampled(_OP), + _OP.resample(), + _OP.resample(), + _OP.resample(), + _OP.resample(), + _OP.resample(), + _OP.resample(), ), ) @@ -111,25 +110,25 @@ class HNASLikePipeline(PipelineSpace): Operation( operator="D0 Sequential3", args=( - Resampled(_C), - Resampled(_C), + _C.resample(), + _C.resample(), CL, ), ), Operation( operator="D0 Sequential4", args=( - Resampled(_C), - Resampled(_C), - Resampled(_C), + _C.resample(), + _C.resample(), + _C.resample(), CL, ), ), Operation( operator="D0 Residual3", args=( - Resampled(_C), - Resampled(_C), + _C.resample(), + _C.resample(), CL, CL, ), @@ -141,27 +140,27 @@ class HNASLikePipeline(PipelineSpace): Operation( operator="D1 Sequential3", args=( - Resampled(_C), - Resampled(_C), - Resampled(_DOWN), + _C.resample(), + _C.resample(), + _DOWN.resample(), ), ), Operation( operator="D1 Sequential4", args=( - Resampled(_C), - Resampled(_C), - Resampled(_C), - Resampled(_DOWN), + _C.resample(), + _C.resample(), + _C.resample(), + _DOWN.resample(), ), ), Operation( operator="D1 Residual3", args=( - Resampled(_C), - Resampled(_C), - Resampled(_DOWN), - Resampled(_DOWN), + _C.resample(), + _C.resample(), + _DOWN.resample(), + _DOWN.resample(), ), ), ), @@ -172,26 +171,26 @@ class HNASLikePipeline(PipelineSpace): Operation( operator="D2 Sequential3", args=( - Resampled(_D1), - Resampled(_D1), - Resampled(_D0), + _D1.resample(), + _D1.resample(), + _D0.resample(), ), ), Operation( operator="D2 Sequential3", args=( - Resampled(_D0), - Resampled(_D1), - Resampled(_D1), + _D0.resample(), + _D1.resample(), + _D1.resample(), ), ), Operation( operator="D2 Sequential4", args=( - Resampled(_D1), - Resampled(_D1), - Resampled(_D0), - Resampled(_D0), + _D1.resample(), + _D1.resample(), + _D0.resample(), + _D0.resample(), ), ), ), diff --git a/tests/test_neps_space/test_search_space__nos_like.py b/tests/test_neps_space/test_search_space__nos_like.py index 9dd784e94..55bde46aa 100644 --- a/tests/test_neps_space/test_search_space__nos_like.py +++ b/tests/test_neps_space/test_search_space__nos_like.py @@ -48,58 +48,58 @@ class NosBench(PipelineSpace): _POINTER = Categorical( choices=( - Resampled(_PARAMS), - Resampled(_CONST), - Resampled(_VAR), + _PARAMS.resample(), + _CONST.resample(), + _VAR.resample(), ), ) _UNARY = Operation( operator="Unary", args=( - Resampled(_UNARY_FUN), - Resampled(_POINTER), + _UNARY_FUN.resample(), + _POINTER.resample(), ), ) _BINARY = Operation( operator="Binary", args=( - Resampled(_BINARY_FUN), - Resampled(_POINTER), - Resampled(_POINTER), + _BINARY_FUN.resample(), + _POINTER.resample(), + _POINTER.resample(), ), ) _TERNARY = Operation( operator="Ternary", args=( - Resampled(_TERNARY_FUN), - Resampled(_POINTER), - Resampled(_POINTER), - Resampled(_POINTER), + _TERNARY_FUN.resample(), + _POINTER.resample(), + _POINTER.resample(), + _POINTER.resample(), ), ) _F_ARGS = Categorical( choices=( - Resampled(_UNARY), - Resampled(_BINARY), - Resampled(_TERNARY), + _UNARY.resample(), + _BINARY.resample(), + _TERNARY.resample(), ), ) _F = Operation( operator="Function", - args=(Resampled(_F_ARGS),), - kwargs={"var": Resampled(_VAR)}, + args=(_F_ARGS.resample(),), + kwargs={"var": _VAR.resample()}, ) _L_ARGS = Categorical( choices=( - (Resampled(_F),), + (_F.resample(),), ( - Resampled(_F), + _F.resample(), Resampled("_L"), ), ), @@ -107,12 +107,12 @@ class NosBench(PipelineSpace): _L = Operation( operator="Line_operator", - args=Resampled(_L_ARGS), + args=_L_ARGS.resample(), ) P = Operation( operator="Program", - args=(Resampled(_L),), + args=(_L.resample(),), ) diff --git a/tests/test_neps_space/test_search_space__recursion.py b/tests/test_neps_space/test_search_space__recursion.py index 4f65b1e01..c223022aa 100644 --- a/tests/test_neps_space/test_search_space__recursion.py +++ b/tests/test_neps_space/test_search_space__recursion.py @@ -58,7 +58,7 @@ class DemoRecursiveOperationSpace(PipelineSpace): ) model = Operation( operator=Model, - args=(Resampled(_inner_function),), + args=(_inner_function.resample(),), kwargs={"factor": _factor}, ) diff --git a/tests/test_neps_space/test_search_space__resampled.py b/tests/test_neps_space/test_search_space__resampled.py index 4fba5a874..e5b4a5791 100644 --- a/tests/test_neps_space/test_search_space__resampled.py +++ b/tests/test_neps_space/test_search_space__resampled.py @@ -10,7 +10,6 @@ Integer, Operation, PipelineSpace, - Resampled, ) @@ -34,14 +33,14 @@ class ActPipelineSimpleFloat(PipelineSpace): prelu_own_clone1 = Operation( operator="prelu", - kwargs={"init": Resampled(prelu_init_value)}, + kwargs={"init": prelu_init_value.resample()}, ) prelu_own_clone2 = Operation( operator="prelu", - kwargs={"init": Resampled(prelu_init_value)}, + kwargs={"init": prelu_init_value.resample()}, ) - _prelu_init_resampled = Resampled(prelu_init_value) + _prelu_init_resampled = prelu_init_value.resample() prelu_common_clone1 = Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, @@ -66,14 +65,14 @@ class ActPipelineComplexInteger(PipelineSpace): prelu_own_clone1 = Operation( operator="prelu", - kwargs={"init": Resampled(prelu_init_value)}, + kwargs={"init": prelu_init_value.resample()}, ) prelu_own_clone2 = Operation( operator="prelu", - kwargs={"init": Resampled(prelu_init_value)}, + kwargs={"init": prelu_init_value.resample()}, ) - _prelu_init_resampled = Resampled(prelu_init_value) + _prelu_init_resampled = prelu_init_value.resample() prelu_common_clone1 = Operation( operator="prelu", kwargs={"init": _prelu_init_resampled}, @@ -97,7 +96,7 @@ class ActPipelineComplexInteger(PipelineSpace): "prelu_shared": prelu_shared1, "prelu_own_clone": prelu_own_clone1, "prelu_common_clone": prelu_common_clone1, - "resampled_hp_value": Resampled(prelu_init_value), + "resampled_hp_value": prelu_init_value.resample(), }, ) @@ -118,21 +117,21 @@ class CellPipelineCategorical(PipelineSpace): ) op2 = Categorical( choices=( - Resampled(conv_block), + conv_block.resample(), Operation("op2"), ), ) - _resampled_op1 = Resampled(op1) + _resampled_op1 = op1.resample() cell = Operation( operator="cell", args=( op1, op2, _resampled_op1, - Resampled(op2), + op2.resample(), _resampled_op1, - Resampled(op2), + op2.resample(), ), ) diff --git a/tests/test_neps_space/test_space_conversion_and_compatibility.py b/tests/test_neps_space/test_space_conversion_and_compatibility.py index 1381db389..dceb96b55 100644 --- a/tests/test_neps_space/test_space_conversion_and_compatibility.py +++ b/tests/test_neps_space/test_space_conversion_and_compatibility.py @@ -19,7 +19,6 @@ Integer, Operation, PipelineSpace, - Resampled, ) @@ -52,7 +51,7 @@ class ComplexNepsSpace(PipelineSpace): # Operation with resampled parameters operation = Operation( operator=lambda x, y: x * y, - args=(factor, Resampled(factor)), + args=(factor, factor.resample()), ) # Categorical with operations as choices diff --git a/tests/test_neps_space/test_string_formatter.py b/tests/test_neps_space/test_string_formatter.py index e022a5500..30733d3d6 100644 --- a/tests/test_neps_space/test_string_formatter.py +++ b/tests/test_neps_space/test_string_formatter.py @@ -30,9 +30,9 @@ def test_operation_with_args_only(): op = Operation(operator="Add", args=(1, 2, 3)) result = operation_to_string(op) expected = """Add( - 1, - 2, - 3, + 1, + 2, + 3, )""" assert result == expected @@ -42,8 +42,8 @@ def test_operation_with_kwargs_only(): op = Operation(operator="Conv2d", kwargs={"in_channels": 3, "out_channels": 64}) result = operation_to_string(op) expected = """Conv2d( - in_channels=3, - out_channels=64, + in_channels=3, + out_channels=64, )""" assert result == expected @@ -57,9 +57,9 @@ def test_operation_with_args_and_kwargs(): ) result = operation_to_string(op) expected = """LinearLayer( - 128, - activation=relu, - dropout=0.5, + 128, + activation=relu, + dropout=0.5, )""" assert result == expected @@ -70,7 +70,7 @@ def test_nested_operations(): outer = Operation(operator="Sequential", args=(inner,)) result = operation_to_string(outer) expected = """Sequential( - ReLU(), + ReLU(), )""" assert result == expected @@ -88,15 +88,15 @@ def test_deeply_nested_operations(): result = operation_to_string(sequential) expected = """Sequential( - Conv2d( - in_channels=3, - out_channels=64, - kernel_size=3, - ), - ReLU(), - MaxPool2d( - kernel_size=2, - ), + Conv2d( + in_channels=3, + out_channels=64, + kernel_size=3, + ), + ReLU(), + MaxPool2d( + kernel_size=2, + ), )""" assert result == expected @@ -106,7 +106,7 @@ def test_list_as_arg(): op = Operation(operator="Conv2d", kwargs={"kernel_size": [3, 3]}) result = operation_to_string(op) expected = """Conv2d( - kernel_size=[3, 3], + kernel_size=[3, 3], )""" assert result == expected @@ -128,7 +128,7 @@ def test_tuple_as_arg(): op = Operation(operator="Shape", args=((64, 64, 3),)) result = operation_to_string(op) expected = """Shape( - (64, 64, 3), + (64, 64, 3), )""" assert result == expected @@ -142,10 +142,10 @@ def test_dict_as_kwarg(): result = operation_to_string(op) # Dict gets expanded due to length expected = """ConfigOp( - config={ - 'learning_rate': 0.001, - 'batch_size': 32, - }, + config={ + 'learning_rate': 0.001, + 'batch_size': 32, + }, )""" assert result == expected @@ -159,14 +159,14 @@ def test_operations_in_list(): result = operation_to_string(container) expected = """ModuleList( - [ - Conv2d( - channels=32, - ), - Conv2d( - channels=64, - ), - ], + [ + Conv2d( + channels=32, + ), + Conv2d( + channels=64, + ), + ], )""" assert result == expected @@ -180,10 +180,10 @@ def test_operations_in_list_as_kwarg(): result = operation_to_string(container) expected = """Container( - layers=[ - ReLU(), - Sigmoid(), - ], + layers=[ + ReLU(), + Sigmoid(), + ], )""" assert result == expected @@ -282,7 +282,7 @@ def test_empty_list(): op = Operation(operator="Op", kwargs={"items": []}) result = operation_to_string(op) expected = """Op( - items=[], + items=[], )""" assert result == expected @@ -292,7 +292,7 @@ def test_empty_tuple(): op = Operation(operator="Op", args=((),)) result = operation_to_string(op) expected = """Op( - (), + (), )""" assert result == expected @@ -302,7 +302,7 @@ def test_empty_dict(): op = Operation(operator="Op", kwargs={"config": {}}) result = operation_to_string(op) expected = """Op( - config={}, + config={}, )""" assert result == expected @@ -312,9 +312,9 @@ def test_boolean_values(): op = Operation(operator="Op", kwargs={"enabled": True, "debug": False, "count": 0}) result = operation_to_string(op) expected = """Op( - enabled=True, - debug=False, - count=0, + enabled=True, + debug=False, + count=0, )""" assert result == expected @@ -324,7 +324,7 @@ def test_none_value(): op = Operation(operator="Op", kwargs={"default": None}) result = operation_to_string(op) expected = """Op( - default=None, + default=None, )""" assert result == expected From d7009fb2ac13141e5c93e35872ce15e89f7ed329 Mon Sep 17 00:00:00 2001 From: Meganton Date: Fri, 28 Nov 2025 22:50:51 +0100 Subject: [PATCH 136/156] Refactor NePS space parameters and update string formatting - Replaced instances of `Fidelity` with `IntegerFidelity` in various files to standardize fidelity parameter usage. - Updated the string formatting function calls from `operation_to_string` to `format_value` across multiple test files for consistency. - Cleaned up code formatting and improved readability by adding or adjusting whitespace in several locations. - Enhanced the `ask_and_tell_example.py` and `freeze_thaw.py` scripts to reflect the new fidelity parameter structure. - Adjusted test cases to ensure compatibility with the updated parameter types and formatting functions. --- docs/getting_started.md | 2 +- docs/reference/neps_run.md | 2 +- docs/reference/neps_spaces.md | 25 ++-- neps/__init__.py | 11 +- neps/api.py | 10 +- neps/space/neps_spaces/neps_space.py | 49 ++----- neps/space/neps_spaces/parameters.py | 124 ++++++++++++++---- neps/space/neps_spaces/string_formatter.py | 42 +++--- neps/utils/__init__.py | 6 +- .../create_and_import_custom_config.py | 2 +- neps_examples/convenience/import_trials.py | 2 +- neps_examples/efficiency/multi_fidelity.py | 2 +- .../multi_fidelity_and_expert_priors.py | 2 +- .../efficiency/pytorch_lightning_ddp.py | 2 +- .../efficiency/pytorch_lightning_fsdp.py | 2 +- .../efficiency/pytorch_native_fsdp.py | 2 +- .../experimental/ask_and_tell_example.py | 32 +++-- neps_examples/experimental/freeze_thaw.py | 2 +- .../test_neps_space/test_neps_integration.py | 13 +- ...st_neps_integration_priorband__max_cost.py | 10 +- ...t_neps_integration_priorband__max_evals.py | 10 +- .../test_pipeline_space_methods.py | 10 +- .../test_search_space__fidelity.py | 9 +- .../test_search_space__grammar_like.py | 42 +++--- .../test_search_space__hnas_like.py | 14 +- .../test_search_space__nos_like.py | 8 +- .../test_search_space__recursion.py | 4 +- .../test_search_space__reuse_arch_elements.py | 22 ++-- ...test_space_conversion_and_compatibility.py | 5 +- .../test_neps_space/test_string_formatter.py | 66 +++++----- tests/test_neps_space/utils.py | 24 ---- .../test_trajectory_and_metrics.py | 4 +- tests/test_state/test_neps_state.py | 6 +- 33 files changed, 293 insertions(+), 273 deletions(-) delete mode 100644 tests/test_neps_space/utils.py diff --git a/docs/getting_started.md b/docs/getting_started.md index 91bacec44..5cbcdf9da 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -21,7 +21,7 @@ class ExampleSpace(neps.PipelineSpace): some_parameter = neps.Float(lower=0.0, upper=1.0) # float another_parameter = neps.Integer(lower=0, upper=10) # integer optimizer = neps.Categorical(choices=("sgd", "adam")) # categorical - epoch = neps.Fidelity(neps.Integer(lower=1, upper=100)) + epoch = neps.IntegerFidelity(lower=1, upper=100) learning_rate = neps.Float(lower=1e-5, upper=1, log=True) alpha = neps.Float(lower=0.1, upper=1.0, prior=0.99, prior_confidence="high") ``` diff --git a/docs/reference/neps_run.md b/docs/reference/neps_run.md index 9b725a955..a7f419d34 100644 --- a/docs/reference/neps_run.md +++ b/docs/reference/neps_run.md @@ -21,7 +21,7 @@ def evaluate_pipeline(learning_rate: float, epochs: int) -> float: class ExamplePipeline(neps.PipelineSpace): learning_rate = neps.Float(1e-3, 1e-1, log=True) - epochs = neps.Fidelity(neps.Integer(10, 100)) + epochs = neps.IntegerFidelity(10, 100) neps.run( evaluate_pipeline=evaluate_pipeline, # (1)! diff --git a/docs/reference/neps_spaces.md b/docs/reference/neps_spaces.md index b89600a2c..fd5b227de 100644 --- a/docs/reference/neps_spaces.md +++ b/docs/reference/neps_spaces.md @@ -9,7 +9,9 @@ - [`neps.Integer`][neps.space.neps_spaces.parameters.Integer]: Discrete integer values - [`neps.Float`][neps.space.neps_spaces.parameters.Float]: Continuous float values - [`neps.Categorical`][neps.space.neps_spaces.parameters.Categorical]: Discrete categorical values -- [`neps.Fidelity`][neps.space.neps_spaces.parameters.Fidelity]: Special type for float or integer, [multi-fidelity](../reference/search_algorithms/multifidelity.md) parameters (e.g., epochs, dataset size) +- [`neps.IntegerFidelity`][neps.space.neps_spaces.parameters.IntegerFidelity]: Integer [multi-fidelity](../reference/search_algorithms/multifidelity.md) parameters (e.g., epochs, batch size) +- [`neps.FloatFidelity`][neps.space.neps_spaces.parameters.FloatFidelity]: Float [multi-fidelity](../reference/search_algorithms/multifidelity.md) parameters (e.g., dataset subset ratio) +- [`neps.Fidelity`][neps.space.neps_spaces.parameters.Fidelity]: Generic fidelity type (use IntegerFidelity or FloatFidelity instead) Using these types, you can define the parameters that NePS will optimize during the search process. A **NePS space** is defined as a subclass of [`PipelineSpace`][neps.space.neps_spaces.parameters.PipelineSpace]. Here we define the hyperparameters that make up the space, like so: @@ -38,10 +40,15 @@ class MySpace(neps.PipelineSpace): ### Using cheap approximation, providing a [**Fidelity**](../reference/search_algorithms/landing_page_algo.md#what-is-multi-fidelity-optimization) Parameter -Passing a [`neps.Integer`][neps.space.neps_spaces.parameters.Integer] or [`neps.Float`][neps.space.neps_spaces.parameters.Float] to a [`neps.Fidelity`][neps.space.neps_spaces.parameters.Fidelity] allows you to employ multi-fidelity optimization strategies, which can significantly speed up the optimization process by evaluating configurations at different fidelities (e.g., training for fewer epochs): +You can use [`neps.IntegerFidelity`][neps.space.neps_spaces.parameters.IntegerFidelity] or [`neps.FloatFidelity`][neps.space.neps_spaces.parameters.FloatFidelity] to employ multi-fidelity optimization strategies, which can significantly speed up the optimization process by evaluating configurations at different fidelities (e.g., training for fewer epochs): ```python -epochs = neps.Fidelity(neps.Integer(1, 16)) +# Convenient syntax (recommended) +epochs = neps.IntegerFidelity(lower=1, upper=16) +subset_ratio = neps.FloatFidelity(lower=0.1, upper=1.0) + +# Alternative syntax (also works) +epochs = neps.IntegerFidelity(1, 16) ``` For more details on how to use fidelity parameters, see the [Multi-Fidelity](../reference/search_algorithms/landing_page_algo.md#what-is-multi-fidelity-optimization) section. @@ -148,12 +155,12 @@ def evaluate_pipeline( Until now all parameters are sampled once and their value used for all occurrences. This section describes how to resample parameters in different contexts using: -- [`.resample()`][neps.space.neps_spaces.parameters.Resampled]: Resample from an existing parameters range +- [`.resample()`][neps.space.neps_spaces.parameters.Resample]: Resample from an existing parameters range With `.resample()` you can reuse a parameter, even themselves recursively, but with a new value each time: ```python -class ResampledSpace(neps.PipelineSpace): +class ResampleSpace(neps.PipelineSpace): float_param = neps.Float(lower=0, upper=1) # The resampled parameter will have the same range but will be sampled @@ -190,15 +197,15 @@ def evaluate_pipeline(cnn: torch.nn.Module): ??? info "Self- and future references" - When referencing itself or a not yet defined parameter (to enable recursions) use a string of that parameters name with `neps.Resampled("parameter_name")`, like so: + When referencing itself or a not yet defined parameter (to enable recursions) use a string of that parameters name with `neps.Resample("parameter_name")`, like so: ```python self_reference = Categorical( choices=( # It will either choose to resample itself twice - (neps.Resampled("self_reference"), neps.Resampled("self_reference")), + (neps.Resample("self_reference"), neps.Resample("self_reference")), # Or it will sample the future parameter - (neps.Resampled("future_param"),), + (neps.Resample("future_param"),), ) ) # This results in a (possibly infinite) tuple of independently sampled future_params @@ -208,7 +215,7 @@ def evaluate_pipeline(cnn: torch.nn.Module): !!! tip "Complex structural spaces" - Together, [Resampling][neps.space.neps_spaces.parameters.Resampled] and [operations][neps.space.neps_spaces.parameters.Operation] allow you to define complex search spaces across the whole ML-pipeline akin to [Context-Free Grammars (CFGs)](https://en.wikipedia.org/wiki/Context-free_grammar), exceeding architecture search. For example, you can sample neural optimizers from a set of instructions, as done in [`NOSBench`](https://openreview.net/pdf?id=5Lm2ghxMlp) to train models. + Together, [Resampling][neps.space.neps_spaces.parameters.Resample] and [operations][neps.space.neps_spaces.parameters.Operation] allow you to define complex search spaces across the whole ML-pipeline akin to [Context-Free Grammars (CFGs)](https://en.wikipedia.org/wiki/Context-free_grammar), exceeding architecture search. For example, you can sample neural optimizers from a set of instructions, as done in [`NOSBench`](https://openreview.net/pdf?id=5Lm2ghxMlp) to train models. ## Inspecting Configurations diff --git a/neps/__init__.py b/neps/__init__.py index 583ca5b77..5d6956acb 100644 --- a/neps/__init__.py +++ b/neps/__init__.py @@ -25,15 +25,17 @@ ConfidenceLevel, Fidelity, Float, + FloatFidelity, Integer, + IntegerFidelity, Operation, PipelineSpace, - Resampled, + Resample, ) from neps.state import BudgetInfo, Trial from neps.state.pipeline_eval import UserResultDict from neps.status.status import status -from neps.utils import convert_operation_to_callable, convert_operation_to_string +from neps.utils import convert_operation_to_callable from neps.utils.files import load_and_merge_yamls __all__ = [ @@ -43,21 +45,22 @@ "ConfidenceLevel", "Fidelity", "Float", + "FloatFidelity", "HPOCategorical", "HPOConstant", "HPOFloat", "HPOInteger", "Integer", + "IntegerFidelity", "Operation", "PipelineSpace", - "Resampled", + "Resample", "SampledConfig", "SearchSpace", "Trial", "UserResultDict", "algorithms", "convert_operation_to_callable", - "convert_operation_to_string", "create_config", "import_trials", "load_and_merge_yamls", diff --git a/neps/api.py b/neps/api.py index bbea06637..6d1aefcca 100644 --- a/neps/api.py +++ b/neps/api.py @@ -96,9 +96,8 @@ class MySpace(PipelineSpace): learning_rate = neps.Float( # log spaced float lower=1e-5, upper=1, log=True ) - epochs = neps.Fidelity( # fidelity integer - neps.Integer(1, 100) - ) + epochs = # fidelity integer + neps.IntegerFidelity(1, 100) batch_size = neps.Integer( # integer with a prior lower=32, upper=512, @@ -160,9 +159,8 @@ class MySpace(PipelineSpace): learning_rate = neps.Float( # log spaced float lower=1e-5, upper=1, log=True ) - epochs = neps.Fidelity( # fidelity integer - neps.Integer(1, 100) - ) + epochs = # fidelity integer + neps.IntegerFidelity(1, 100) batch_size = neps.Integer( # integer with a prior lower=32, upper=512, diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index 91c3bbcc4..d02018641 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -24,7 +24,7 @@ Operation, PipelineSpace, Repeated, - Resampled, + Resample, Resolvable, ) from neps.space.neps_spaces.sampling import ( @@ -211,7 +211,7 @@ def resolving(self, _obj: Any, name: str) -> Generator[None]: # It is possible that the received object has already been resolved. # That is expected and is okay, so no check is made for it. - # For example, in the case of a Resampled we can receive the same object again. + # For example, in the case of a Resample we can receive the same object again. self._current_path_parts.append(name) try: @@ -239,7 +239,7 @@ def add_resolved(self, original: Any, resolved: Any) -> None: Raises: ValueError: If the original object was already resolved or if it is a - Resampled. + Resample. """ if self.was_already_resolved(original): raise ValueError( @@ -248,9 +248,9 @@ def add_resolved(self, original: Any, resolved: Any) -> None: + "make sure you are not forgetting to request resampling also for" " related objects." + "\nOtherwise it could lead to infinite recursion." ) - if isinstance(original, Resampled): + if isinstance(original, Resample): raise ValueError( - f"Attempting to add a Resampled object to resolved values: {original!r}." + f"Attempting to add a Resample object to resolved values: {original!r}." ) self._resolved_objects[original] = resolved @@ -506,7 +506,7 @@ def _( # Otherwise, we already have the final answer for it. resolved_attr_value = context.get_resolved(initial_attr_value) elif isinstance(initial_attr_value, Categorical) or ( - isinstance(initial_attr_value, Resampled) + isinstance(initial_attr_value, Resample) and isinstance(initial_attr_value.source, Categorical) ): # We have a previously unseen provider. @@ -527,7 +527,7 @@ def _( resolved_attr_value = self._resolve( choice_provider_adjusted, "choice_provider", context ) - if not isinstance(initial_attr_value, Resampled): + if not isinstance(initial_attr_value, Resample): # It's important that we handle filling the context here, # as we manually created a different object from the original. # In case the original categorical is used again, @@ -604,13 +604,13 @@ def _( @_resolver_dispatch.register def _( self, - resampled_obj: Resampled, + resampled_obj: Resample, context: SamplingResolutionContext, ) -> Any: - # The results of Resampled are never stored or looked up from cache + # The results of Resample are never stored or looked up from cache # since it would break the logic of their expected behavior. - # Particularly, when Resampled objects are nested (at any depth) inside of - # other Resampled objects, adding them to the resolution context would result + # Particularly, when Resample objects are nested (at any depth) inside of + # other Resample objects, adding them to the resolution context would result # in the resolution not doing the right thing. if resampled_obj.is_resampling_by_name: @@ -929,27 +929,6 @@ def convert_operation_to_callable(operation: Operation) -> Callable: return cast(Callable, operator(*operation_args, **operation_kwargs)) -def convert_operation_to_string(operation: Operation | str | int | float) -> str: - """Convert an Operation to a string representation. - - Args: - operation: The Operation to convert, or a primitive value. - - Returns: - A string representation of the operation or value. - - Raises: - ValueError: If the operation is not a valid Operation object. - """ - from neps.space.neps_spaces.string_formatter import format_value - - # Handle non-Operation values (resolved primitives) - if not isinstance(operation, Operation): - return str(operation) - - return format_value(operation) - - # ------------------------------------------------- @@ -1123,7 +1102,7 @@ def inner(*args: Any, **kwargs: Any) -> Any: def convert_neps_to_classic_search_space(space: PipelineSpace) -> SearchSpace | None: """Convert a NePS space to a classic SearchSpace if possible. This function checks if the NePS space can be converted to a classic SearchSpace - by ensuring that it does not contain any complex types like Operation or Resampled, + by ensuring that it does not contain any complex types like Operation or Resample, and that all choices of Categorical parameters are of basic types (int, str, float). If the checks pass, it converts the NePS space to a classic SearchSpace. @@ -1133,9 +1112,9 @@ def convert_neps_to_classic_search_space(space: PipelineSpace) -> SearchSpace | Returns: A classic SearchSpace if the conversion is possible, otherwise None. """ - # First check: No parameters are of type Operation or Resampled + # First check: No parameters are of type Operation or Resample if not any( - isinstance(param, Operation | Resampled) for param in space.get_attrs().values() + isinstance(param, Operation | Resample) for param in space.get_attrs().values() ): # Second check: All choices of all categoricals are of basic # types i.e. int, str or float diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index e6ed7f133..8b522d5a2 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -25,18 +25,18 @@ T = TypeVar("T") # Shared docstring constants for DRY -_RESAMPLE_DOCSTRING = """Wrap this {type_name} in a Resampled container. +_RESAMPLE_DOCSTRING = """Wrap this {type_name} in a Resample container. This allows resampling the {type_name} each time it's resolved, useful for creating dynamic structures where the same {description} is sampled multiple times independently. Returns: - A Resampled instance wrapping this {type_name}. + A Resample instance wrapping this {type_name}. Example: ```python - # Instead of: neps.Resampled(my_{type_lower}) + # Instead of: neps.Resample(my_{type_lower}) # You can write: my_{type_lower}.resample() ``` """ @@ -235,6 +235,84 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Fidelity: # noqa: ARG002 raise ValueError("For a Fidelity object there is nothing to resolve.") +class IntegerFidelity(Fidelity): + """A convenience class for creating integer-valued fidelity parameters. + + This class provides a simpler interface for defining integer fidelities without + needing to explicitly wrap an Integer domain in a Fidelity. + + Example: + ```python + # Instead of: epochs = neps.Fidelity(neps.Integer(1, 50)) + # You can write: + epochs = neps.IntegerFidelity(lower=1, upper=50) + ``` + """ + + def __init__( + self, + lower: int, + upper: int, + *, + log: bool = False, + ): + """Initialize an IntegerFidelity with lower and upper bounds. + + Args: + lower: The minimum value for the integer fidelity. + upper: The maximum value for the integer fidelity. + log: Whether to sample the integer on a logarithmic scale. + + """ + super().__init__(domain=Integer(lower=lower, upper=upper, log=log)) + + def __str__(self) -> str: + """Get a string representation of the integer fidelity.""" + domain = self.domain + if domain._log: + return f"IntegerFidelity({domain.lower}, {domain.upper}, log)" + return f"IntegerFidelity({domain.lower}, {domain.upper})" + + +class FloatFidelity(Fidelity): + """A convenience class for creating float-valued fidelity parameters. + + This class provides a simpler interface for defining float fidelities without + needing to explicitly wrap a Float domain in a Fidelity. + + Example: + ```python + # Instead of: subset_ratio = neps.Fidelity(neps.Float(0.1, 1.0)) + # You can write: + subset_ratio = neps.FloatFidelity(lower=0.1, upper=1.0) + ``` + """ + + def __init__( + self, + lower: float, + upper: float, + *, + log: bool = False, + ): + """Initialize a FloatFidelity with lower and upper bounds. + + Args: + lower: The minimum value for the float fidelity. + upper: The maximum value for the float fidelity. + log: Whether to sample the float on a logarithmic scale. + + """ + super().__init__(domain=Float(lower=lower, upper=upper, log=log)) + + def __str__(self) -> str: + """Get a string representation of the float fidelity.""" + domain = self.domain + if domain._log: + return f"FloatFidelity({domain.lower}, {domain.upper}, log)" + return f"FloatFidelity({domain.lower}, {domain.upper})" + + class PipelineSpace(Resolvable): """A class representing a pipeline in NePS spaces.""" @@ -331,14 +409,14 @@ def __str__(self) -> str: def add( self, - new_param: Integer | Float | Categorical | Operation | Resampled | Repeated, + new_param: Integer | Float | Categorical | Operation | Resample | Repeated, name: str | None = None, ) -> PipelineSpace: """Add a new parameter to the pipeline. Args: new_param: The parameter to be added, which can be an Integer, Float, - Categorical, Operation, Resampled, Repeated, or PipelineSpace. + Categorical, Operation, Resample, Repeated, or PipelineSpace. name: The name of the parameter to be added. If None, a default name will be generated. @@ -356,10 +434,10 @@ def add( return new_space if not isinstance( - new_param, Integer | Float | Categorical | Operation | Resampled | Repeated + new_param, Integer | Float | Categorical | Operation | Resample | Repeated ): raise ValueError( - "Can only add Integer, Float, Categorical, Operation, Resampled," + "Can only add Integer, Float, Categorical, Operation, Resample," f" Repeated or PipelineSpace, got {new_param!r}." ) param_name = name if name else f"param_{len(self.get_attrs()) + 1}" @@ -640,23 +718,23 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Domain[T]: """ return type(self)(**attrs) - def resample(self) -> Resampled: - """Wrap this domain in a Resampled container. + def resample(self) -> Resample: + """Wrap this domain in a Resample container. This allows resampling the domain each time it's resolved, useful for creating dynamic structures where the same parameter definition is sampled multiple times independently. Returns: - A Resampled instance wrapping this domain. + A Resample instance wrapping this domain. Example: ```python - # Instead of: neps.Resampled(neps.Integer(1, 10)) + # Instead of: neps.Resample(neps.Integer(1, 10)) # You can write: neps.Integer(1, 10).resample() ``` """ - return Resampled(self) + return Resample(self) def _calculate_new_domain_bounds( @@ -1412,9 +1490,9 @@ def __str__(self) -> str: return format_value(self) - def resample(self) -> Resampled: # noqa: D102 + def resample(self) -> Resample: # noqa: D102 # Docstring set dynamically below - return Resampled(self) + return Resample(self) def compare_domain_to(self, other: object) -> bool: # noqa: D102 # Docstring set dynamically below @@ -1502,10 +1580,10 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Operation: # TODO: [lum] For tuples, lists and dicts, # should we make the behavior similar to other resolvables, -# in that they will be cached and then we also need to use Resampled for them? +# in that they will be cached and then we also need to use Resample for them? -class Resampled(Resolvable): +class Resample(Resolvable): """A class representing a resampling operation in a NePS space. Attributes: @@ -1514,7 +1592,7 @@ class Resampled(Resolvable): """ def __init__(self, source: Resolvable | str): - """Initialize the Resampled object with a source. + """Initialize the Resample object with a source. Args: source: The source of the resampling, can be a resolvable object or a string. @@ -1595,7 +1673,7 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: def compare_domain_to(self, other: object) -> bool: # noqa: D102 # Docstring set dynamically below - if not isinstance(other, Resampled): + if not isinstance(other, Resample): return False return self.source == other.source @@ -1644,9 +1722,9 @@ def content(self) -> Resolvable | Any: """ return self._content - def resample(self) -> Resampled: # noqa: D102 + def resample(self) -> Resample: # noqa: D102 # Docstring set dynamically below - return Resampled(self) + return Resample(self) def get_attrs(self) -> Mapping[str, Any]: """Get the attributes of the resolvable as a mapping. @@ -1699,9 +1777,9 @@ def content(self) -> Resolvable | tuple[Any] | str: """ return self._content - def resample(self) -> Resampled: # noqa: D102 + def resample(self) -> Resample: # noqa: D102 # Docstring set dynamically below - return Resampled(self) + return Resample(self) def get_attrs(self) -> Mapping[str, Any]: """Get the attributes of the lazy resolvable as a mapping. @@ -1769,6 +1847,6 @@ def from_attrs(self, attrs: Mapping[str, Any]) -> Resolvable: # noqa: ARG002 Operation.compare_domain_to.__doc__ = _COMPARE_DOMAIN_DOCSTRING.format( type_name="operation" ) -Resampled.compare_domain_to.__doc__ = _COMPARE_DOMAIN_DOCSTRING.format( +Resample.compare_domain_to.__doc__ = _COMPARE_DOMAIN_DOCSTRING.format( type_name="resampled" ) diff --git a/neps/space/neps_spaces/string_formatter.py b/neps/space/neps_spaces/string_formatter.py index b327bea7c..0b8b9eb3c 100644 --- a/neps/space/neps_spaces/string_formatter.py +++ b/neps/space/neps_spaces/string_formatter.py @@ -9,7 +9,7 @@ ├── _format_categorical() - Internal handler for Categorical ├── _format_float() - Internal handler for Float ├── _format_integer() - Internal handler for Integer - ├── _format_resampled() - Internal handler for Resampled + ├── _format_resampled() - Internal handler for Resample ├── _format_repeated() - Internal handler for Repeated ├── _format_operation() - Internal handler for Operation ├── _format_sequence() - Internal handler for list/tuple @@ -31,7 +31,7 @@ Integer, Operation, Repeated, - Resampled, + Resample, ) @@ -40,7 +40,7 @@ class FormatterStyle: """Configuration for the formatting style.""" indent_str: str = " " # Three spaces for indentation - max_line_length: int = 80 # Try to keep lines under this length + max_line_length: int = 90 # Try to keep lines under this length compact_threshold: int = 40 # Use compact format if repr is shorter show_empty_args: bool = True # Show () for operations with no args/kwargs @@ -50,7 +50,7 @@ class FormatterStyle: # ============================================================================ -def format_value( # noqa: C901, PLR0911 +def format_value( # noqa: C901, PLR0911, PLR0912 value: Any, indent: int = 0, style: FormatterStyle | None = None, @@ -70,11 +70,12 @@ def format_value( # noqa: C901, PLR0911 """ from neps.space.neps_spaces.parameters import ( Categorical, + Fidelity, Float, Integer, Operation, Repeated, - Resampled, + Resample, ) if style is None: @@ -93,7 +94,11 @@ def format_value( # noqa: C901, PLR0911 if isinstance(value, Integer): return _format_integer(value, indent, style) - if isinstance(value, Resampled): + if isinstance(value, Fidelity): + # Use the __str__ method of Fidelity subclasses directly + return str(value) + + if isinstance(value, Resample): return _format_resampled(value, indent, style) if isinstance(value, Repeated): @@ -214,11 +219,11 @@ def _format_integer( def _format_resampled( - resampled: Resampled, + resampled: Resample, indent: int, style: FormatterStyle, ) -> str: - """Internal formatter for Resampled parameters.""" + """Internal formatter for Resample parameters.""" source = resampled._source # Format the source using unified format_value @@ -228,10 +233,10 @@ def _format_resampled( if "\n" in source_str: indent_str = style.indent_str * indent inner_indent_str = style.indent_str * (indent + 1) - return f"Resampled(\n{inner_indent_str}{source_str}\n{indent_str})" + return f"Resample(\n{inner_indent_str}{source_str}\n{indent_str})" # Simple single-line format for basic types - return f"Resampled({source_str})" + return f"Resample({source_str})" def _format_repeated( @@ -391,20 +396,3 @@ def _format_pipeline_space( else: lines.append(f" {k} = {formatted_value}") return "\n".join(lines) - - -# ============================================================================ -# BACKWARD COMPATIBILITY - Legacy function names -# ============================================================================ - - -def operation_to_string( - operation: Operation | Any, - style: FormatterStyle | None = None, -) -> str: - """Convert an Operation to a pretty-formatted string. - - Legacy function for backward compatibility. - New code should use format_value() directly. - """ - return format_value(operation, 0, style) diff --git a/neps/utils/__init__.py b/neps/utils/__init__.py index 35122741f..fbc96b0eb 100644 --- a/neps/utils/__init__.py +++ b/neps/utils/__init__.py @@ -1,11 +1,7 @@ -from neps.space.neps_spaces.neps_space import ( - convert_operation_to_callable, - convert_operation_to_string, -) +from neps.space.neps_spaces.neps_space import convert_operation_to_callable from neps.utils.trial_io import load_trials_from_pickle __all__ = [ "convert_operation_to_callable", - "convert_operation_to_string", "load_trials_from_pickle", ] diff --git a/neps_examples/convenience/create_and_import_custom_config.py b/neps_examples/convenience/create_and_import_custom_config.py index 467162f5a..e78407b87 100644 --- a/neps_examples/convenience/create_and_import_custom_config.py +++ b/neps_examples/convenience/create_and_import_custom_config.py @@ -7,7 +7,7 @@ # This example space demonstrates all types of parameters available in NePS. class ExampleSpace(neps.PipelineSpace): - int1 = neps.Fidelity(neps.Integer(1, 10)) + int1 = neps.IntegerFidelity(1, 10) float1 = neps.Float(0.0, 1.0) cat1 = neps.Categorical(["a", "b", "c"]) cat2 = neps.Categorical(["x", "y", float1]) diff --git a/neps_examples/convenience/import_trials.py b/neps_examples/convenience/import_trials.py index f5a335fe2..555011c31 100644 --- a/neps_examples/convenience/import_trials.py +++ b/neps_examples/convenience/import_trials.py @@ -135,7 +135,7 @@ def get_evaluated_trials(optimizer) -> list[tuple[dict[str, Any], UserResultDict def run_import_trials(optimizer): class ExampleSpace(neps.PipelineSpace): float1 = neps.Float(lower=0, upper=1) - float2 = neps.Fidelity(neps.Float(lower=1, upper=10)) + float2 = neps.FloatFidelity(lower=1, upper=10) categorical = neps.Categorical(choices=[0, 1]) integer1 = neps.Integer(lower=0, upper=1) integer2 = neps.Integer(lower=1, upper=1000, log=True) diff --git a/neps_examples/efficiency/multi_fidelity.py b/neps_examples/efficiency/multi_fidelity.py index b3fb2781d..b6eeccc0c 100644 --- a/neps_examples/efficiency/multi_fidelity.py +++ b/neps_examples/efficiency/multi_fidelity.py @@ -85,7 +85,7 @@ def evaluate_pipeline( class HPOSpace(neps.PipelineSpace): learning_rate = neps.Float(lower=1e-4, upper=1e0, log=True) - epoch = neps.Fidelity(neps.Integer(lower=1, upper=10)) + epoch = neps.IntegerFidelity(lower=1, upper=10) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/efficiency/multi_fidelity_and_expert_priors.py b/neps_examples/efficiency/multi_fidelity_and_expert_priors.py index 2f582803f..05841fcac 100644 --- a/neps_examples/efficiency/multi_fidelity_and_expert_priors.py +++ b/neps_examples/efficiency/multi_fidelity_and_expert_priors.py @@ -32,7 +32,7 @@ class HPOSpace(neps.PipelineSpace): prior=35, prior_confidence="low", ) - fidelity = neps.Fidelity(neps.Integer(lower=1, upper=10)) + fidelity = neps.IntegerFidelity(lower=1, upper=10) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/efficiency/pytorch_lightning_ddp.py b/neps_examples/efficiency/pytorch_lightning_ddp.py index 99dd117fc..07c3b970a 100644 --- a/neps_examples/efficiency/pytorch_lightning_ddp.py +++ b/neps_examples/efficiency/pytorch_lightning_ddp.py @@ -87,7 +87,7 @@ def evaluate_pipeline(lr=0.1, epoch=20): class HPOSpace(neps.PipelineSpace): lr = neps.Float(lower=0.001, upper=0.1, log=True, prior=0.01) - epoch = neps.Fidelity(neps.Integer(lower=1, upper=3)) + epoch = neps.IntegerFidelity(lower=1, upper=3) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/efficiency/pytorch_lightning_fsdp.py b/neps_examples/efficiency/pytorch_lightning_fsdp.py index 14c7bb481..3f7afa609 100644 --- a/neps_examples/efficiency/pytorch_lightning_fsdp.py +++ b/neps_examples/efficiency/pytorch_lightning_fsdp.py @@ -58,7 +58,7 @@ def evaluate_pipeline(lr=0.1, epoch=20): class HPOSpace(neps.PipelineSpace): lr = neps.Float(lower=0.001, upper=0.1, log=True, prior=0.01) - epoch = neps.Fidelity(neps.Integer(lower=1, upper=3)) + epoch = neps.IntegerFidelity(lower=1, upper=3) neps.run( evaluate_pipeline=evaluate_pipeline, diff --git a/neps_examples/efficiency/pytorch_native_fsdp.py b/neps_examples/efficiency/pytorch_native_fsdp.py index 775219ed1..4e441f761 100644 --- a/neps_examples/efficiency/pytorch_native_fsdp.py +++ b/neps_examples/efficiency/pytorch_native_fsdp.py @@ -210,7 +210,7 @@ def evaluate_pipeline(lr=0.1, epoch=20): class HPOSpace(neps.PipelineSpace): lr = neps.Float(lower=0.0001, upper=0.1, log=True, prior=0.01) - epoch = neps.Fidelity(neps.Integer(lower=1, upper=3)) + epoch = neps.IntegerFidelity(lower=1, upper=3) neps.run( evaluate_pipeline=evaluate_pipeline, diff --git a/neps_examples/experimental/ask_and_tell_example.py b/neps_examples/experimental/ask_and_tell_example.py index afe1bd792..e1944985d 100644 --- a/neps_examples/experimental/ask_and_tell_example.py +++ b/neps_examples/experimental/ask_and_tell_example.py @@ -50,6 +50,7 @@ This script serves as a template for implementing custom trial execution workflows with NePS. """ + import argparse import time from pathlib import Path @@ -61,6 +62,7 @@ from neps.optimizers.ask_and_tell import AskAndTell + def submit_job(pipeline_directory: Path, script: str) -> int: script_path = pipeline_directory / "submit.sh" print(f"Submitting the script {script_path} (see below): \n\n{script}") @@ -72,6 +74,7 @@ def submit_job(pipeline_directory: Path, script: str) -> int: job_id = int(output.split()[-1]) return job_id + def get_job_script(pipeline_directory, trial_file): script = f"""#!/bin/bash #SBATCH --job-name=mnist_toy @@ -82,6 +85,7 @@ def get_job_script(pipeline_directory, trial_file): """ return script + def train_worker(trial_file): trial_file = Path(trial_file) with open(trial_file) as f: @@ -89,16 +93,18 @@ def train_worker(trial_file): config = trial["config"] # Dummy objective - loss = (config["a"] - 0.5)**2 + ((config["b"] + 2)**2) / 5 + loss = (config["a"] - 0.5) ** 2 + ((config["b"] + 2) ** 2) / 5 out_file = trial_file.parent / f"result_{trial['id']}.json" with open(out_file, "w") as f: json.dump({"loss": loss}, f) + def main(parallel: int, results_dir: Path): class MySpace(neps.PipelineSpace): - a = neps.Fidelity(neps.Integer(1, 13)) + a = neps.IntegerFidelity(1, 13) b = neps.Float(1, 5) + space = MySpace() opt = neps.algorithms.neps_hyperband(space, eta=3) ask_tell = AskAndTell(opt) @@ -128,20 +134,30 @@ class MySpace(neps.PipelineSpace): new_trial = ask_tell.ask() if new_trial: new_file = results_dir / f"trial_{new_trial.id}.json" - json.dump({"id": new_trial.id, "config": new_trial.config}, new_file.open("w")) - new_job_id = submit_job(results_dir, get_job_script(results_dir, new_file)) + json.dump( + {"id": new_trial.id, "config": new_trial.config}, + new_file.open("w"), + ) + new_job_id = submit_job( + results_dir, get_job_script(results_dir, new_file) + ) active[new_job_id] = new_trial time.sleep(5) + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( - "--parallel", type=int, default=9, - help="Number of trials to evaluate in parallel initially" + "--parallel", + type=int, + default=9, + help="Number of trials to evaluate in parallel initially", ) parser.add_argument( - "--results-dir", type=Path, default=Path("results/ask_and_tell"), - help="Path to save the results inside" + "--results-dir", + type=Path, + default=Path("results/ask_and_tell"), + help="Path to save the results inside", ) args = parser.parse_args() main(args.parallel, args.results_dir) diff --git a/neps_examples/experimental/freeze_thaw.py b/neps_examples/experimental/freeze_thaw.py index e8391ac1e..fe490c1ce 100644 --- a/neps_examples/experimental/freeze_thaw.py +++ b/neps_examples/experimental/freeze_thaw.py @@ -153,7 +153,7 @@ class ModelSpace(neps.PipelineSpace): num_layers = neps.Integer(1, 5) num_neurons = neps.Integer(64, 128) weight_decay = neps.Float(1e-5, 0.1, log=True) - epochs = neps.Fidelity(neps.Integer(1, 10)) + epochs = neps.IntegerFidelity(1, 10) neps.run( pipeline_space=ModelSpace(), diff --git a/tests/test_neps_space/test_neps_integration.py b/tests/test_neps_space/test_neps_integration.py index f2bc8334a..8f18ace39 100644 --- a/tests/test_neps_space/test_neps_integration.py +++ b/tests/test_neps_space/test_neps_integration.py @@ -6,7 +6,6 @@ import pytest import neps -import neps.optimizers from neps.optimizers import algorithms from neps.space.neps_spaces.neps_space import ( check_neps_space_compatibility, @@ -16,9 +15,9 @@ from neps.space.neps_spaces.parameters import ( Categorical, ConfidenceLevel, - Fidelity, Float, Integer, + IntegerFidelity, Operation, PipelineSpace, ) @@ -99,11 +98,9 @@ class DemoHyperparameterWithFidelitySpace(PipelineSpace): prior=0, prior_confidence=ConfidenceLevel.MEDIUM, ) - integer2 = Fidelity( - Integer( - lower=1, - upper=1000, - ), + integer2 = IntegerFidelity( + lower=1, + upper=1000, ) @@ -491,7 +488,7 @@ def test_algorithm_compatibility(): ], f"Algorithm {algo.__name__} should be classic or both compatible" -# Test with complex PipelineSpace containing Operations and Resampled +# Test with complex PipelineSpace containing Operations and Resample def test_complex_neps_space_features(): """Test complex NePS space features that cannot be converted to classic.""" diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py index 7881bd1e7..77266e171 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_cost.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_cost.py @@ -9,9 +9,9 @@ from neps import algorithms from neps.space.neps_spaces.parameters import ( ConfidenceLevel, - Fidelity, Float, Integer, + IntegerFidelity, PipelineSpace, ) @@ -53,11 +53,9 @@ class DemoHyperparameterWithFidelitySpace(PipelineSpace): prior=35, prior_confidence=ConfidenceLevel.LOW, ) - fidelity = Fidelity( - domain=Integer( - lower=1, - upper=100, - ), + fidelity = IntegerFidelity( + lower=1, + upper=100, ) diff --git a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py index 586675bd6..828c82e37 100644 --- a/tests/test_neps_space/test_neps_integration_priorband__max_evals.py +++ b/tests/test_neps_space/test_neps_integration_priorband__max_evals.py @@ -9,9 +9,9 @@ from neps.optimizers import algorithms from neps.space.neps_spaces.parameters import ( ConfidenceLevel, - Fidelity, Float, Integer, + IntegerFidelity, PipelineSpace, ) @@ -40,11 +40,9 @@ class DemoHyperparameterWithFidelitySpace(PipelineSpace): prior=35, prior_confidence=ConfidenceLevel.LOW, ) - fidelity = Fidelity( - domain=Integer( - lower=1, - upper=100, - ), + fidelity = IntegerFidelity( + lower=1, + upper=100, ) diff --git a/tests/test_neps_space/test_pipeline_space_methods.py b/tests/test_neps_space/test_pipeline_space_methods.py index 98155f04c..af90b2b52 100644 --- a/tests/test_neps_space/test_pipeline_space_methods.py +++ b/tests/test_neps_space/test_pipeline_space_methods.py @@ -7,12 +7,12 @@ from neps.space.neps_spaces.parameters import ( Categorical, ConfidenceLevel, - Fidelity, Float, Integer, + IntegerFidelity, Operation, PipelineSpace, - Resampled, + Resample, ) @@ -76,11 +76,11 @@ def test_add_method_different_types(): assert "new_op" in space.get_attrs() assert isinstance(space.get_attrs()["new_op"], Operation) - # Add Resampled + # Add Resample resampled = space.get_attrs()["x"].resample() space = space.add(resampled, "new_resampled") assert "new_resampled" in space.get_attrs() - assert isinstance(space.get_attrs()["new_resampled"], Resampled) + assert isinstance(space.get_attrs()["new_resampled"], Resample) def test_add_method_with_default_name(): @@ -351,7 +351,7 @@ def test_fidelity_operations(): class FidelitySpace(PipelineSpace): x = Float(lower=0.0, upper=1.0) - epochs = Fidelity(Integer(lower=1, upper=100)) + epochs = IntegerFidelity(lower=1, upper=100) space = FidelitySpace() diff --git a/tests/test_neps_space/test_search_space__fidelity.py b/tests/test_neps_space/test_search_space__fidelity.py index ce19f49a7..922999785 100644 --- a/tests/test_neps_space/test_search_space__fidelity.py +++ b/tests/test_neps_space/test_search_space__fidelity.py @@ -11,6 +11,7 @@ Fidelity, Float, Integer, + IntegerFidelity, PipelineSpace, ) @@ -23,11 +24,9 @@ class DemoHyperparametersWithFidelitySpace(PipelineSpace): prior=0.1, prior_confidence=ConfidenceLevel.MEDIUM, ) - fidelity_integer1 = Fidelity( - domain=Integer( - lower=1, - upper=1000, - ), + fidelity_integer1 = IntegerFidelity( + lower=1, + upper=1000, ) diff --git a/tests/test_neps_space/test_search_space__grammar_like.py b/tests/test_neps_space/test_search_space__grammar_like.py index 6b0f23cbd..e4788bc01 100644 --- a/tests/test_neps_space/test_search_space__grammar_like.py +++ b/tests/test_neps_space/test_search_space__grammar_like.py @@ -8,7 +8,7 @@ Categorical, Operation, PipelineSpace, - Resampled, + Resample, ) @@ -28,7 +28,7 @@ class GrammarLike(PipelineSpace): operator="Sequential", args=( _O.resample(), - Resampled("S"), + Resample("S"), _reluconvbn, ), ) @@ -36,12 +36,12 @@ class GrammarLike(PipelineSpace): operator="Sequential", args=( _O.resample(), - Resampled("S"), + Resample("S"), ), ) _C3 = Operation( operator="Sequential", - args=(Resampled("S"),), + args=(Resample("S"),), ) _C = Categorical( choices=( @@ -62,12 +62,12 @@ class GrammarLike(PipelineSpace): ) _S2 = Operation( operator="Sequential", - args=(Resampled("S"),), + args=(Resample("S"),), ) _S3 = Operation( operator="Sequential", args=( - Resampled("S"), + Resample("S"), _C.resample(), ), ) @@ -82,8 +82,8 @@ class GrammarLike(PipelineSpace): _S5 = Operation( operator="Sequential", args=( - Resampled("S"), - Resampled("S"), + Resample("S"), + Resample("S"), _O.resample(), _O.resample(), _O.resample(), @@ -117,14 +117,14 @@ class GrammarLikeAlt(PipelineSpace): (_O.resample(),), ( _O.resample(), - Resampled("S"), + Resample("S"), _reluconvbn, ), ( _O.resample(), - Resampled("S"), + Resample("S"), ), - (Resampled("S"),), + (Resample("S"),), ), ) _C = Operation( @@ -136,9 +136,9 @@ class GrammarLikeAlt(PipelineSpace): choices=( (_C.resample(),), (_reluconvbn,), - (Resampled("S"),), + (Resample("S"),), ( - Resampled("S"), + Resample("S"), _C.resample(), ), ( @@ -147,8 +147,8 @@ class GrammarLikeAlt(PipelineSpace): _O.resample(), ), ( - Resampled("S"), - Resampled("S"), + Resample("S"), + Resample("S"), _O.resample(), _O.resample(), _O.resample(), @@ -174,10 +174,8 @@ def test_resolve(): pytest.xfail("XFAIL due to too much recursion.") s = resolved_pipeline.S - s_config_string = neps_space.convert_operation_to_string(s) + s_config_string = string_formatter.format_value(s) assert s_config_string - pretty_config = string_formatter.format_value(s) - assert pretty_config @pytest.mark.repeat(500) @@ -190,10 +188,8 @@ def test_resolve_alt(): pytest.xfail("XFAIL due to too much recursion.") s = resolved_pipeline.S - s_config_string = neps_space.convert_operation_to_string(s) + s_config_string = string_formatter.format_value(s) assert s_config_string - pretty_config = string_formatter.format_value(s) - assert pretty_config def test_resolve_context(): @@ -311,7 +307,7 @@ def test_resolve_context(): assert sampled_values == samplings_to_make s = resolved_pipeline.S - s_config_string = neps_space.convert_operation_to_string(s) + s_config_string = string_formatter.format_value(s) assert s_config_string # Verify the config contains expected operation names (format may be compact or multiline) assert "Sequential" in s_config_string @@ -499,7 +495,7 @@ def test_resolve_context_alt(): assert sampled_values == samplings_to_make s = resolved_pipeline.S - s_config_string = neps_space.convert_operation_to_string(s) + s_config_string = string_formatter.format_value(s) assert s_config_string # Verify the config contains expected operation names (format may be compact or multiline) assert "Sequential" in s_config_string diff --git a/tests/test_neps_space/test_search_space__hnas_like.py b/tests/test_neps_space/test_search_space__hnas_like.py index 17169d315..c4d867fb1 100644 --- a/tests/test_neps_space/test_search_space__hnas_like.py +++ b/tests/test_neps_space/test_search_space__hnas_like.py @@ -23,7 +23,7 @@ class HNASLikePipeline(PipelineSpace): # ------------------------------------------------------ # Adding `PReLU` with a float hyperparameter `init` # Note that the sampled `_prelu_init_value` will be shared across all `_PRELU` uses, - # since no `Resampled` was requested for it + # since no `Resample` was requested for it _prelu_init_value = Float(lower=0.1, upper=0.9) _PRELU = Operation( operator="ACT prelu", @@ -216,16 +216,12 @@ def test_hnas_like_string(): resolved_pipeline, _ = neps_space.resolve(pipeline) arch = resolved_pipeline.ARCH - arch_config_string = neps_space.convert_operation_to_string(arch) + arch_config_string = string_formatter.format_value(arch) assert arch_config_string - pretty_config = string_formatter.format_value(arch) - assert pretty_config cl = resolved_pipeline.CL - cl_config_string = neps_space.convert_operation_to_string(cl) + cl_config_string = string_formatter.format_value(cl) assert cl_config_string - pretty_config = string_formatter.format_value(cl) - assert pretty_config def test_hnas_like_context(): @@ -298,7 +294,7 @@ def test_hnas_like_context(): assert sampled_values == samplings_to_make cl = resolved_pipeline.CL - cl_config_string = neps_space.convert_operation_to_string(cl) + cl_config_string = string_formatter.format_value(cl) assert cl_config_string # The new formatter outputs operations in full rather than using sharing references # Check for essential elements instead of exact format @@ -312,7 +308,7 @@ def test_hnas_like_context(): assert "avg_pool" in cl_config_string arch = resolved_pipeline.ARCH - arch_config_string = neps_space.convert_operation_to_string(arch) + arch_config_string = string_formatter.format_value(arch) assert arch_config_string # Check that arch contains CL-related operations (nested structure) assert "Cell(" in arch_config_string diff --git a/tests/test_neps_space/test_search_space__nos_like.py b/tests/test_neps_space/test_search_space__nos_like.py index 55bde46aa..f4ffc28d3 100644 --- a/tests/test_neps_space/test_search_space__nos_like.py +++ b/tests/test_neps_space/test_search_space__nos_like.py @@ -8,7 +8,7 @@ Integer, Operation, PipelineSpace, - Resampled, + Resample, ) @@ -100,7 +100,7 @@ class NosBench(PipelineSpace): (_F.resample(),), ( _F.resample(), - Resampled("_L"), + Resample("_L"), ), ), ) @@ -127,7 +127,5 @@ def test_resolve(): raise p = resolved_pipeline.P - p_config_string = neps_space.convert_operation_to_string(p) + p_config_string = string_formatter.format_value(p) assert p_config_string - pretty_config = string_formatter.format_value(p) - assert pretty_config diff --git a/tests/test_neps_space/test_search_space__recursion.py b/tests/test_neps_space/test_search_space__recursion.py index c223022aa..b65c36b70 100644 --- a/tests/test_neps_space/test_search_space__recursion.py +++ b/tests/test_neps_space/test_search_space__recursion.py @@ -8,7 +8,7 @@ Float, Operation, PipelineSpace, - Resampled, + Resample, ) @@ -54,7 +54,7 @@ class DemoRecursiveOperationSpace(PipelineSpace): # If we want the `factor` values to be different, # we just request a resample for them _inner_function = Categorical( - choices=(_sum, Resampled("model")), + choices=(_sum, Resample("model")), ) model = Operation( operator=Model, diff --git a/tests/test_neps_space/test_search_space__reuse_arch_elements.py b/tests/test_neps_space/test_search_space__reuse_arch_elements.py index 443906096..6877e9a0d 100644 --- a/tests/test_neps_space/test_search_space__reuse_arch_elements.py +++ b/tests/test_neps_space/test_search_space__reuse_arch_elements.py @@ -3,7 +3,7 @@ import pytest import neps.space.neps_spaces.sampling -from neps.space.neps_spaces import neps_space +from neps.space.neps_spaces import neps_space, string_formatter from neps.space.neps_spaces.parameters import ( Categorical, ConfidenceLevel, @@ -160,7 +160,7 @@ def test_nested_simple_string(): resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) act = resolved_pipeline.act - act_config_string = neps_space.convert_operation_to_string(act) + act_config_string = string_formatter.format_value(act) assert act_config_string # Check for one of the possible operations @@ -206,7 +206,7 @@ def test_nested_complex_string(): resolved_pipeline, _ = neps_space.resolve(pipeline) act = resolved_pipeline.act - act_config_string = neps_space.convert_operation_to_string(act) + act_config_string = string_formatter.format_value(act) assert act_config_string # Format is now expanded, check for content @@ -243,7 +243,7 @@ def test_fixed_pipeline_string(): resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) act = resolved_pipeline.act - act_config_string = neps_space.convert_operation_to_string(act) + act_config_string = string_formatter.format_value(act) assert act_config_string # Check content rather than exact format (now always expanded) assert "prelu" in act_config_string @@ -289,7 +289,7 @@ def test_simple_reuse_string(): resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) conv_block = resolved_pipeline.conv_block - conv_block_config_string = neps_space.convert_operation_to_string(conv_block) + conv_block_config_string = string_formatter.format_value(conv_block) assert conv_block_config_string # Should contain sequential3 and three conv operations @@ -361,7 +361,7 @@ def test_shared_complex_string(): resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) cell = resolved_pipeline.cell - cell_config_string = neps_space.convert_operation_to_string(cell) + cell_config_string = string_formatter.format_value(cell) # Verify essential elements are present assert cell_config_string @@ -384,7 +384,7 @@ def test_shared_complex_context(): # This one should only do the reuse tests # todo: add a more complex test, where we have hidden Categorical choices. - # E.g. add Resampled along the way + # E.g. add Resample along the way samplings_to_make = { "Resolvable.op1::categorical__3": 2, @@ -427,12 +427,8 @@ def test_shared_complex_context(): # The new formatter outputs operations in full rather than using references. # Check that both resolutions produce the same format and contain expected operations. - config_str_first = neps_space.convert_operation_to_string( - resolved_pipeline_first.cell - ) - config_str_second = neps_space.convert_operation_to_string( - resolved_pipeline_second.cell - ) + config_str_first = string_formatter.format_value(resolved_pipeline_first.cell) + config_str_second = string_formatter.format_value(resolved_pipeline_second.cell) # Both resolutions with same samplings should produce identical output assert config_str_first == config_str_second diff --git a/tests/test_neps_space/test_space_conversion_and_compatibility.py b/tests/test_neps_space/test_space_conversion_and_compatibility.py index dceb96b55..1309690d8 100644 --- a/tests/test_neps_space/test_space_conversion_and_compatibility.py +++ b/tests/test_neps_space/test_space_conversion_and_compatibility.py @@ -17,6 +17,7 @@ Fidelity, Float, Integer, + IntegerFidelity, Operation, PipelineSpace, ) @@ -37,7 +38,7 @@ class SimpleHPOWithFidelitySpace(PipelineSpace): x = Float(lower=0.0, upper=1.0, prior=0.5, prior_confidence=ConfidenceLevel.MEDIUM) y = Integer(lower=1, upper=10, prior=5, prior_confidence=ConfidenceLevel.HIGH) - epochs = Fidelity(Integer(lower=1, upper=100)) + epochs = IntegerFidelity(lower=1, upper=100) class ComplexNepsSpace(PipelineSpace): @@ -339,7 +340,7 @@ def test_neps_hyperband_rejects_classic_space(): # Create a proper NePS space that should work class TestSpace(PipelineSpace): x = Float(0.0, 1.0) - epochs = Fidelity(Integer(1, 100)) + epochs = IntegerFidelity(1, 100) space = TestSpace() diff --git a/tests/test_neps_space/test_string_formatter.py b/tests/test_neps_space/test_string_formatter.py index 30733d3d6..e0d9f9f69 100644 --- a/tests/test_neps_space/test_string_formatter.py +++ b/tests/test_neps_space/test_string_formatter.py @@ -6,14 +6,14 @@ from neps.space.neps_spaces.parameters import Operation from neps.space.neps_spaces.string_formatter import ( FormatterStyle, - operation_to_string, + format_value, ) def test_simple_operation_no_args(): """Test formatting an operation with no arguments - default shows ().""" op = Operation(operator="ReLU") - result = operation_to_string(op) + result = format_value(op) assert result == "ReLU()" @@ -21,14 +21,14 @@ def test_simple_operation_no_args_with_parens(): """Test formatting with show_empty_args=False to hide ().""" op = Operation(operator="ReLU") style = FormatterStyle(show_empty_args=False) - result = operation_to_string(op, style) + result = format_value(op, 0, style) assert result == "ReLU" def test_operation_with_args_only(): """Test formatting an operation with positional args only - always expanded.""" op = Operation(operator="Add", args=(1, 2, 3)) - result = operation_to_string(op) + result = format_value(op) expected = """Add( 1, 2, @@ -40,7 +40,7 @@ def test_operation_with_args_only(): def test_operation_with_kwargs_only(): """Test formatting an operation with keyword args only - always expanded.""" op = Operation(operator="Conv2d", kwargs={"in_channels": 3, "out_channels": 64}) - result = operation_to_string(op) + result = format_value(op) expected = """Conv2d( in_channels=3, out_channels=64, @@ -55,7 +55,7 @@ def test_operation_with_args_and_kwargs(): args=(128,), kwargs={"activation": "relu", "dropout": 0.5}, ) - result = operation_to_string(op) + result = format_value(op) expected = """LinearLayer( 128, activation=relu, @@ -68,7 +68,7 @@ def test_nested_operations(): """Test formatting nested operations.""" inner = Operation(operator="ReLU") outer = Operation(operator="Sequential", args=(inner,)) - result = operation_to_string(outer) + result = format_value(outer) expected = """Sequential( ReLU(), )""" @@ -86,7 +86,7 @@ def test_deeply_nested_operations(): sequential = Operation(operator="Sequential", args=(conv, relu, pool)) - result = operation_to_string(sequential) + result = format_value(sequential) expected = """Sequential( Conv2d( in_channels=3, @@ -104,7 +104,7 @@ def test_deeply_nested_operations(): def test_list_as_arg(): """Test formatting with a list as an argument.""" op = Operation(operator="Conv2d", kwargs={"kernel_size": [3, 3]}) - result = operation_to_string(op) + result = format_value(op) expected = """Conv2d( kernel_size=[3, 3], )""" @@ -115,7 +115,7 @@ def test_long_list_as_arg(): """Test formatting with a longer list that spans multiple lines.""" long_list = list(range(20)) op = Operation(operator="SomeOp", kwargs={"values": long_list}) - result = operation_to_string(op) + result = format_value(op) # Should have the list expanded assert "values=[" in result @@ -126,7 +126,7 @@ def test_long_list_as_arg(): def test_tuple_as_arg(): """Test formatting with a tuple as an argument.""" op = Operation(operator="Shape", args=((64, 64, 3),)) - result = operation_to_string(op) + result = format_value(op) expected = """Shape( (64, 64, 3), )""" @@ -139,7 +139,7 @@ def test_dict_as_kwarg(): operator="ConfigOp", kwargs={"config": {"learning_rate": 0.001, "batch_size": 32}}, ) - result = operation_to_string(op) + result = format_value(op) # Dict gets expanded due to length expected = """ConfigOp( config={ @@ -157,7 +157,7 @@ def test_operations_in_list(): container = Operation(operator="ModuleList", args=([op1, op2],)) - result = operation_to_string(container) + result = format_value(container) expected = """ModuleList( [ Conv2d( @@ -178,7 +178,7 @@ def test_operations_in_list_as_kwarg(): container = Operation(operator="Container", kwargs={"layers": [op1, op2]}) - result = operation_to_string(container) + result = format_value(container) expected = """Container( layers=[ ReLU(), @@ -195,7 +195,7 @@ def test_mixed_types_in_list(): container = Operation(operator="MixedContainer", args=(mixed_list,)) - result = operation_to_string(container) + result = format_value(container) # Check that all elements are present assert "1," in result @@ -215,7 +215,7 @@ def test_string_values_with_quotes(): "double_quotes": 'say "hello"', }, ) - result = operation_to_string(op) + result = format_value(op) # Check strings are properly represented assert "text='hello world'" in result or 'text="hello world"' in result @@ -239,7 +239,7 @@ def test_complex_nested_structure(): kwargs={"dropout": 0.5, "config": {"layers": [3, 64, 128]}}, ) - result = operation_to_string(seq) + result = format_value(seq) # Verify structure assert "Sequential(" in result @@ -255,13 +255,13 @@ def test_complex_nested_structure(): def test_non_operation_value(): """Test formatting a non-Operation value.""" # Should work with any value - result1 = operation_to_string(42) + result1 = format_value(42) assert result1 == "42" - result2 = operation_to_string("hello") + result2 = format_value("hello") assert result2 == "hello" # Identifiers don't get quotes - result3 = operation_to_string([1, 2, 3]) + result3 = format_value([1, 2, 3]) assert result3 == "[1, 2, 3]" @@ -270,7 +270,7 @@ def test_custom_indent(): op = Operation(operator="Conv2d", kwargs={"channels": 64}) style = FormatterStyle(indent_str=" ") # 4 spaces - result = operation_to_string(op, style) + result = format_value(op, 0, style) expected = """Conv2d( channels=64, )""" @@ -280,7 +280,7 @@ def test_custom_indent(): def test_empty_list(): """Test formatting with empty list.""" op = Operation(operator="Op", kwargs={"items": []}) - result = operation_to_string(op) + result = format_value(op) expected = """Op( items=[], )""" @@ -290,7 +290,7 @@ def test_empty_list(): def test_empty_tuple(): """Test formatting with empty tuple.""" op = Operation(operator="Op", args=((),)) - result = operation_to_string(op) + result = format_value(op) expected = """Op( (), )""" @@ -300,7 +300,7 @@ def test_empty_tuple(): def test_empty_dict(): """Test formatting with empty dict.""" op = Operation(operator="Op", kwargs={"config": {}}) - result = operation_to_string(op) + result = format_value(op) expected = """Op( config={}, )""" @@ -310,7 +310,7 @@ def test_empty_dict(): def test_boolean_values(): """Test formatting with boolean values - always expanded.""" op = Operation(operator="Op", kwargs={"enabled": True, "debug": False, "count": 0}) - result = operation_to_string(op) + result = format_value(op) expected = """Op( enabled=True, debug=False, @@ -322,7 +322,7 @@ def test_boolean_values(): def test_none_value(): """Test formatting with None value - always expanded.""" op = Operation(operator="Op", kwargs={"default": None}) - result = operation_to_string(op) + result = format_value(op) expected = """Op( default=None, )""" @@ -356,7 +356,7 @@ def test_real_world_example(): args=([conv1, relu1, pool1, conv2, relu2, pool2, flatten, fc],), ) - result = operation_to_string(model) + result = format_value(model) # Verify key elements are present assert "Sequential(" in result @@ -391,7 +391,7 @@ class TestSpace(neps.PipelineSpace): assert isinstance(resolved.choice, Operation) # Should format properly - check for essential content - result = operation_to_string(resolved.choice) + result = format_value(resolved.choice) # Either Conv2d with both params, or ReLU is_conv = ( "Conv2d" in result and "in_channels=3" in result and "kernel_size=3" in result @@ -413,7 +413,7 @@ class TestSpace(neps.PipelineSpace): assert isinstance(resolved.choice, str) # Should format as a simple string (identifiers don't get quotes) - result = operation_to_string(resolved.choice) + result = format_value(resolved.choice) assert result in ["adam", "sgd", "rmsprop"] @@ -433,11 +433,11 @@ class TestSpace(neps.PipelineSpace): resolved, _ = neps.space.neps_spaces.neps_space.resolve(space) # Should format appropriately based on what was chosen - result = operation_to_string(resolved.choice) + result = format_value(resolved.choice) # Check it's one of the expected formats (identifiers don't get quotes) possible_results = [ - "Linear(\n in_features=10,\n)", # Expanded format + "Linear(\n in_features=10,\n)", # Expanded format (3-space indent) "Linear(in_features=10)", # Compact format (simple operation) "simple_string", # Identifiers don't get quotes "42", @@ -458,7 +458,7 @@ class TestSpace(neps.PipelineSpace): assert isinstance(resolved.lr, float) # Should format as a simple number - result = operation_to_string(resolved.lr) + result = format_value(resolved.lr) assert result == repr(resolved.lr) @@ -475,7 +475,7 @@ class TestSpace(neps.PipelineSpace): assert isinstance(resolved.batch_size, int) # Should format as a simple number - result = operation_to_string(resolved.batch_size) + result = format_value(resolved.batch_size) assert result == repr(resolved.batch_size) diff --git a/tests/test_neps_space/utils.py b/tests/test_neps_space/utils.py deleted file mode 100644 index bf7420c3e..000000000 --- a/tests/test_neps_space/utils.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import annotations - -from collections.abc import Callable - -from neps.space.neps_spaces import neps_space - - -def generate_possible_config_strings( - pipeline: neps_space.PipelineSpace, - resolved_pipeline_attr_getter: Callable[ - [neps_space.PipelineSpace], - neps_space.Operation, - ], - num_resolutions: int = 50_000, -): - result = set() - - for _ in range(num_resolutions): - resolved_pipeline, _resolution_context = neps_space.resolve(pipeline) - attr = resolved_pipeline_attr_getter(resolved_pipeline) - config_string = neps_space.convert_operation_to_string(attr) - result.add(config_string) - - return result diff --git a/tests/test_runtime/test_trajectory_and_metrics.py b/tests/test_runtime/test_trajectory_and_metrics.py index b82a1ca81..05ee06312 100644 --- a/tests/test_runtime/test_trajectory_and_metrics.py +++ b/tests/test_runtime/test_trajectory_and_metrics.py @@ -14,9 +14,9 @@ from neps.optimizers import algorithms from neps.runtime import DefaultWorker from neps.space.neps_spaces.parameters import ( - Fidelity, Float, Integer, + IntegerFidelity, PipelineSpace, ) from neps.state.neps_state import NePSState @@ -46,7 +46,7 @@ class SpaceWithFidelity(PipelineSpace): x = Float(lower=0.0, upper=1.0) y = Integer(lower=1, upper=10) - epochs = Fidelity(Integer(lower=1, upper=50)) + epochs = IntegerFidelity(lower=1, upper=50) def simple_evaluation(x: float, y: int) -> dict: diff --git a/tests/test_state/test_neps_state.py b/tests/test_state/test_neps_state.py index 49462fd00..2923f4164 100644 --- a/tests/test_state/test_neps_state.py +++ b/tests/test_state/test_neps_state.py @@ -23,9 +23,9 @@ from neps.space import SearchSpace from neps.space.neps_spaces.parameters import ( Categorical, - Fidelity, Float, Integer, + IntegerFidelity, PipelineSpace, ) from neps.state import BudgetInfo, NePSState, OptimizationState, SeedSnapshot @@ -49,7 +49,7 @@ class SpaceFid(PipelineSpace): b = Categorical(("a", "b", "c")) c = "a" d = Integer(0, 10) - e = Fidelity(Integer(1, 10)) + e = IntegerFidelity(1, 10) return SpaceFid() @@ -72,7 +72,7 @@ class SpaceFidPrior(PipelineSpace): b = Categorical(("a", "b", "c"), prior=0, prior_confidence="medium") c = "a" d = Integer(0, 10, prior=5, prior_confidence="medium") - e = Fidelity(Integer(1, 10)) + e = IntegerFidelity(1, 10) return SpaceFidPrior() From 07e500be48d0df9518b0c88f2b3860bf57279f56 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 30 Nov 2025 14:31:54 +0100 Subject: [PATCH 137/156] feat: Add sampling method to PipelineSpace for random configuration generation --- neps/space/neps_spaces/parameters.py | 42 ++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 8b522d5a2..331061918 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -407,6 +407,48 @@ def __str__(self) -> str: # Delegate to the unified formatter return format_value(self) + def sample( + self, fidelity_values: dict[str, Any] | None = None + ) -> tuple[dict[str, Any], dict[str, Any]]: + """Sample a random configuration from the pipeline. + + Returns: + A tuple containing two dictionaries: + - The first dictionary is the neps-compatible config. + - The second dictionary contains the sampled values. + """ + from neps.space.neps_spaces.neps_space import ( + NepsCompatConverter, + convert_operation_to_callable, + resolve, + ) + from neps.space.neps_spaces.sampling import RandomSampler + from neps.space.neps_spaces.string_formatter import format_value + + if fidelity_values is None: + fidelity_values = {} + for name, value in self.fidelity_attrs.items(): + if name in fidelity_values: + continue + fidelity_values[name] = value.domain.sample() + + sampler = RandomSampler({}) + resolved_pipeline, resolution_context = resolve( + pipeline=self, + domain_sampler=sampler, + environment_values=fidelity_values, + ) + pipeline_dict = dict(**resolved_pipeline.get_attrs()) + + for name, value in pipeline_dict.items(): + if isinstance(value, Operation): # type: ignore[unreachable] + if isinstance(value.operator, str): # type: ignore[unreachable] + pipeline_dict[name] = format_value(value) + else: + pipeline_dict[name] = convert_operation_to_callable(value) + + return dict(NepsCompatConverter.to_neps_config(resolution_context)), pipeline_dict + def add( self, new_param: Integer | Float | Categorical | Operation | Resample | Repeated, From c1d511af8a49e2361ff4e929a8ca5f6320e36c5e Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 30 Nov 2025 16:35:15 +0100 Subject: [PATCH 138/156] feat: Implement pipeline space reconstruction with class identity and add test for serialization --- neps/space/neps_spaces/parameters.py | 45 ++++++++++++++++++- neps/state/neps_state.py | 16 +++++++ tests/test_state/test_filebased_neps_state.py | 31 +++++++++++++ 3 files changed, 90 insertions(+), 2 deletions(-) diff --git a/neps/space/neps_spaces/parameters.py b/neps/space/neps_spaces/parameters.py index 331061918..16168d300 100644 --- a/neps/space/neps_spaces/parameters.py +++ b/neps/space/neps_spaces/parameters.py @@ -8,6 +8,8 @@ import abc import enum +import importlib +import logging import math import random import warnings @@ -95,6 +97,42 @@ def _reconstruct_pipeline_space(attrs: Mapping[str, Any]) -> PipelineSpace: return space +def _reconstruct_pipeline_space_with_class( + module_name: str, qualname: str, attrs: Mapping[str, Any] +) -> PipelineSpace: + """Reconstruct a PipelineSpace instance preserving the original class. + + This tries to import the class by module and qualname and create an instance + without calling its __init__ (to avoid side effects). If anything goes wrong + we fall back to the legacy reconstruction which returns a plain + `PipelineSpace` instance with the saved attributes. + """ + logger = logging.getLogger(__name__) + try: + module_obj = importlib.import_module(module_name) + cls_obj: Any = module_obj + for part in qualname.split("."): + cls_obj = getattr(cls_obj, part) + + if not isinstance(cls_obj, type): + raise TypeError(f"Resolved qualname is not a class: {qualname}") + + # Create instance without running __init__ so we don't require constructor args + instance: PipelineSpace = object.__new__(cls_obj) + for name, value in attrs.items(): + setattr(instance, name, value) + return instance + except (ImportError, AttributeError, TypeError, OSError) as e: + # Best-effort: restore to a plain PipelineSpace with attributes + logger.debug( + "Could not reconstruct PipelineSpace with class %s.%s: %s", + module_name, + qualname, + e, + ) + return _reconstruct_pipeline_space(attrs) + + def _parameters_are_equivalent(param1: Any, param2: Any) -> bool: """Check if two parameters are equivalent using their is_equivalent_to method. @@ -326,9 +364,12 @@ def __reduce__(self) -> tuple: Returns: A tuple (callable, args) for reconstructing the object. """ - # Store the attributes instead of the class definition + # Store the attributes and the original class identity so we can + # reconstruct an instance of the original class on unpickle. attrs = dict(self.get_attrs()) - return (_reconstruct_pipeline_space, (attrs,)) + module_name = self.__class__.__module__ + qualname = self.__class__.__qualname__ + return (_reconstruct_pipeline_space_with_class, (module_name, qualname, attrs)) @property def fidelity_attrs(self) -> Mapping[str, Fidelity]: diff --git a/neps/state/neps_state.py b/neps/state/neps_state.py index 63806540b..6c11e3309 100644 --- a/neps/state/neps_state.py +++ b/neps/state/neps_state.py @@ -836,6 +836,22 @@ def create_or_load( # noqa: C901, PLR0912, PLR0915 with atomic_write(pipeline_space_path, "wb") as f: pickle.dump(pipeline_space, f, protocol=pickle.HIGHEST_PROTOCOL) + # Reload the pipeline space from disk so that the instance + # returned by `create_or_load` matches the on-disk representation + # (this ensures equality checks that compare pickled bytes + # behave consistently between subsequent loads). + try: + with pipeline_space_path.open("rb") as f: + pipeline_space = pickle.load(f) # noqa: S301 + except (EOFError, pickle.UnpicklingError, OSError) as e: + # If reloading fails for expected reasons (corrupt write, + # incomplete file due to race, or IO error) log a warning + # and fall back to the original in-memory `pipeline_space` + # so we don't break creation. We explicitly catch a + # restricted set of exceptions to avoid swallowing + # unexpected errors. + logger.warning("Reloading pipeline_space after write failed: %s", e) + error_dump = ErrDump([]) return NePSState( diff --git a/tests/test_state/test_filebased_neps_state.py b/tests/test_state/test_filebased_neps_state.py index 978d3db78..8180e19f6 100644 --- a/tests/test_state/test_filebased_neps_state.py +++ b/tests/test_state/test_filebased_neps_state.py @@ -123,6 +123,37 @@ class TestSpace(PipelineSpace): assert neps_state == neps_state2 +def test_pipeline_space_written_and_reloaded(tmp_path: Path) -> None: + class TestSpace(PipelineSpace): + a = Float(0, 1) + + optimizer_info = OptimizerInfo(name="test", info={"a": "b"}) + optimizer_state = OptimizationState( + budget=BudgetInfo(cost_to_spend=10, used_cost_budget=0), + seed_snapshot=SeedSnapshot.new_capture(), + shared_state={}, + ) + + new_path = tmp_path / "neps_state" + neps_state = NePSState.create_or_load( + path=new_path, + optimizer_info=optimizer_info, + optimizer_state=optimizer_state, + pipeline_space=TestSpace(), + ) + + # Load-only should return the same state + neps_state2 = NePSState.create_or_load(path=new_path, load_only=True) + assert neps_state == neps_state2 + + # And their pipeline spaces must serialize to the same bytes + import pickle + + assert pickle.dumps(neps_state._pipeline_space) == pickle.dumps( + neps_state2._pipeline_space + ) + + def test_new_or_load_on_existing_neps_state_with_different_optimizer_info( tmp_path: Path, optimizer_info: OptimizerInfo, From 1e8d4a26a071dbee8a945c3a663d7a0bc3cc548f Mon Sep 17 00:00:00 2001 From: Nastaran Alipour Date: Tue, 2 Dec 2025 15:07:22 +0100 Subject: [PATCH 139/156] filter out invalid trials for metrics --- neps/runtime.py | 15 +++---- neps/state/neps_state.py | 14 +++++++ tests/test_state/test_neps_state.py | 62 +++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+), 10 deletions(-) diff --git a/neps/runtime.py b/neps/runtime.py index bb09f90de..71344040e 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -761,9 +761,9 @@ def run(self) -> None: # noqa: C901, PLR0912, PLR0915 if report.objective_to_minimize is not None and report.err is None: with self.state._trial_lock.lock(): - trials = self.state._trial_repo.latest() + evaluated_trials = self.state._trial_repo.get_valid_evaluated_trials() self.load_incumbent_trace( - trials, + evaluated_trials, _trace_lock, improvement_trace_path, best_config_path, @@ -793,7 +793,7 @@ def load_incumbent_trace( configurations. Args: - trials (dict): A dictionary of the trials. + trials (dict): A dictionary of the evaluated trials which have a valid report. _trace_lock (FileLock): A file lock to ensure thread-safe writing. improvement_trace_path (Path): Path to the improvement trace file. best_config_path (Path): Path to the best configuration file. @@ -813,20 +813,15 @@ def load_incumbent_trace( t.metadata.time_sampled if t.metadata.time_sampled else float("inf") ), ) - evaluated_trials: list[Trial] = [ - trial - for trial in sorted_trials - if trial.report is not None and trial.report.objective_to_minimize is not None - ] is_mo = any( isinstance(trial.report.objective_to_minimize, list) # type: ignore[union-attr] - for trial in evaluated_trials + for trial in sorted_trials ) frontier: list[Trial] = [] trajectory_confs: dict[str, dict[str, float | int]] = {} - for evaluated_trial in evaluated_trials: + for evaluated_trial in sorted_trials: single_trial_usage = self._calculate_total_resource_usage( {evaluated_trial.id: evaluated_trial} ) diff --git a/neps/state/neps_state.py b/neps/state/neps_state.py index 6c11e3309..067620e58 100644 --- a/neps/state/neps_state.py +++ b/neps/state/neps_state.py @@ -19,6 +19,8 @@ from pathlib import Path from typing import TYPE_CHECKING, Literal, TypeAlias, TypeVar, overload +import numpy as np + from neps.env import ( GLOBAL_ERR_FILELOCK_POLL, GLOBAL_ERR_FILELOCK_TIMEOUT, @@ -142,6 +144,18 @@ def _read_pkl_and_maybe_consolidate( return trials + def get_valid_evaluated_trials(self) -> dict[str, Trial]: + """Get all trials that have a valid evaluation report.""" + trials = self.latest() + return { + trial_id: trial + for trial_id, trial in trials.items() + if trial.report is not None + and trial.report.err is None + and trial.report.objective_to_minimize is not None + and not np.isnan(trial.report.objective_to_minimize) + } + def latest(self, *, create_cache_if_missing: bool = True) -> dict[str, Trial]: """Get the latest trials from the cache.""" if not self.cache_path.exists(): diff --git a/tests/test_state/test_neps_state.py b/tests/test_state/test_neps_state.py index 2923f4164..c2aa2f5f4 100644 --- a/tests/test_state/test_neps_state.py +++ b/tests/test_state/test_neps_state.py @@ -29,6 +29,7 @@ PipelineSpace, ) from neps.state import BudgetInfo, NePSState, OptimizationState, SeedSnapshot +from neps.state.trial import Report @case @@ -305,3 +306,64 @@ def test_optimizers_work_roughly( ask_and_tell.tell(trial, [1.0, 2.0]) else: ask_and_tell.tell(trial, 1.0) + + +@fixture +def neps_state(tmp_path: Path) -> NePSState: + class TestSpace(PipelineSpace): + a = Float(0, 1) + + return NePSState.create_or_load( + path=tmp_path / "neps_state", + optimizer_info=OptimizerInfo(name="random_search", info={}), + optimizer_state=OptimizationState( + budget=None, + seed_snapshot=SeedSnapshot.new_capture(), + shared_state=None, + ), + pipeline_space=TestSpace(), + ) + + +def test_get_valid_evaluated_trials( + neps_state: NePSState, +) -> None: + optimizer, _ = load_optimizer(("random_search", {}), neps_state._pipeline_space) + trial1 = neps_state.lock_and_sample_trial(optimizer=optimizer, worker_id="1") + trial2 = neps_state.lock_and_sample_trial(optimizer=optimizer, worker_id="1") + trial3 = neps_state.lock_and_sample_trial(optimizer=optimizer, worker_id="1") + + report1 = Report( + objective_to_minimize=0.5, + err=None, + cost=0, + learning_curve=[0], + extra={}, + tb=None, + reported_as="success", + evaluation_duration=1, + ) + neps_state.lock_and_report_trial_evaluation( + trial=trial1, + report=report1, + worker_id="1", + ) + + report2 = Report( + objective_to_minimize=float("nan"), + err=None, + cost=0, + learning_curve=[0], + extra={}, + tb=None, + reported_as="success", + evaluation_duration=1, + ) + neps_state.lock_and_report_trial_evaluation( + trial=trial2, report=report2, worker_id="1" + ) + + valid_trials = neps_state._trial_repo.get_valid_evaluated_trials() + assert len(valid_trials) == 1 + assert trial1.id in valid_trials + assert trial3.id not in valid_trials From 07d8e04b0bed40453bae15c679fb5b4502a6f21d Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 2 Dec 2025 15:27:23 +0100 Subject: [PATCH 140/156] refactor: Update classic-neps-filter function to assume NePS-compatibility for new algorithms --- neps/space/neps_spaces/neps_space.py | 56 ++++++++++++++++------------ 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/neps/space/neps_spaces/neps_space.py b/neps/space/neps_spaces/neps_space.py index d02018641..792e7203f 100644 --- a/neps/space/neps_spaces/neps_space.py +++ b/neps/space/neps_spaces/neps_space.py @@ -1261,14 +1261,16 @@ class NEPSSpace(PipelineSpace): return NEPSSpace() -ONLY_NEPS_ALGORITHMS_NAMES = [ - "neps_random_search", - "neps_priorband", - "complex_random_search", - "neps_hyperband", - "complex_hyperband", - "neps_regularized_evolution", - "regularized_evolution", +ONLY_CLASSIC_ALGORITHMS_NAMES = [ + "asha", + "bayesian_optimization", + "ifbo", + "mo_hyperband", + "primo", + "async_hb", + "successive_halving", + "moasha", + "pibo", ] CLASSIC_AND_NEPS_ALGORITHMS_NAMES = [ "random_search", @@ -1279,15 +1281,18 @@ class NEPSSpace(PipelineSpace): # Lazy initialization to avoid circular imports -def _get_only_neps_algorithms_functions() -> list[Callable]: - """Get the list of NEPS-only algorithm functions lazily.""" +def _get_only_classic_algorithms_functions() -> list[Callable]: + """Get the list of classic-only algorithm functions lazily.""" return [ - algorithms.neps_random_search, - algorithms.neps_priorband, - algorithms.complex_random_search, - algorithms.neps_hyperband, - algorithms.neps_grid_search, - algorithms.neps_regularized_evolution, + algorithms.asha, + algorithms.bayesian_optimization, + algorithms.ifbo, + algorithms.mo_hyperband, + algorithms.primo, + algorithms.async_hb, + algorithms.successive_halving, + algorithms.moasha, + algorithms.pibo, ] @@ -1340,22 +1345,25 @@ def check_neps_space_compatibility( while isinstance(inner_optimizer, partial): inner_optimizer = inner_optimizer.func - only_neps_algorithm = ( - optimizer_to_check in _get_only_neps_algorithms_functions() - or (inner_optimizer and inner_optimizer in _get_only_neps_algorithms_functions()) + only_classic_algorithm = ( + optimizer_to_check in _get_only_classic_algorithms_functions() or ( - optimizer_to_check[0] in ONLY_NEPS_ALGORITHMS_NAMES + inner_optimizer + and inner_optimizer in _get_only_classic_algorithms_functions() + ) + or ( + optimizer_to_check[0] in ONLY_CLASSIC_ALGORITHMS_NAMES if isinstance(optimizer_to_check, tuple) else False ) or ( - optimizer_to_check in ONLY_NEPS_ALGORITHMS_NAMES + optimizer_to_check in ONLY_CLASSIC_ALGORITHMS_NAMES if isinstance(optimizer_to_check, str) else False ) ) - if only_neps_algorithm: - return "neps" + if only_classic_algorithm: + return "classic" neps_and_classic_algorithm = ( optimizer_to_check in _get_classic_and_neps_algorithms_functions() or ( @@ -1376,4 +1384,4 @@ def check_neps_space_compatibility( ) if neps_and_classic_algorithm: return "both" - return "classic" + return "neps" From 5d43547d35155e29af7e4f7f67b8946f123dcfa9 Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 2 Dec 2025 15:40:42 +0100 Subject: [PATCH 141/156] feat: Add NePS Regularized Evolution to predefined optimizers and optimizer choice --- neps/optimizers/algorithms.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index fc1998fc0..81c069f00 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -431,7 +431,9 @@ def _bracket_optimizer( # noqa: C901, PLR0912, PLR0915 ) -def determine_optimizer_automatically(space: SearchSpace | PipelineSpace) -> str: # noqa: PLR0911 +def determine_optimizer_automatically( # noqa: PLR0911 + space: SearchSpace | PipelineSpace, +) -> str: if isinstance(space, PipelineSpace): has_prior = space.has_priors() if space.fidelity_attrs and has_prior: @@ -1822,6 +1824,7 @@ def neps_regularized_evolution( complex_random_search, neps_priorband, neps_hyperband, + neps_regularized_evolution, ) } @@ -1843,4 +1846,5 @@ def neps_regularized_evolution( "complex_random_search", "neps_priorband", "neps_hyperband", + "neps_regularized_evolution", ] From cca015a8041d67d4158cf3f5d1f11915272c528e Mon Sep 17 00:00:00 2001 From: Meganton Date: Tue, 2 Dec 2025 15:43:35 +0100 Subject: [PATCH 142/156] fix: Improve error messages for incompatible optimizers in PipelineSpace --- neps/api.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/neps/api.py b/neps/api.py index 6d1aefcca..b4f4ee2b7 100644 --- a/neps/api.py +++ b/neps/api.py @@ -433,9 +433,9 @@ def __call__( # algorithm, we raise an error, as the optimizer is not compatible. if isinstance(space, PipelineSpace) and neps_classic_space_compatibility == "classic": raise ValueError( - "The provided optimizer is not compatible with this complex search space. " - "Please use one that is, such as 'random_search', 'hyperband', " - "'priorband', or 'complex_random_search'." + f"The provided optimizer {optimizer} is not compatible with this complex" + " search space. Please use one that is, such as 'random_search'," + " 'hyperband', 'priorband', or 'complex_random_search'." ) # Log the search space after conversion @@ -649,9 +649,9 @@ def import_trials( # noqa: C901 # algorithm, we raise an error, as the optimizer is not compatible. if isinstance(space, PipelineSpace) and neps_classic_space_compatibility == "classic": raise ValueError( - "The provided optimizer is not compatible with this complex pipeline space. " - "Please use one that is, such as 'random_search', 'hyperband', " - "'priorband', or 'complex_random_search'." + f"The provided optimizer {optimizer} is not compatible with this complex" + " pipeline space. Please use one that is, such as 'random_search'," + " 'hyperband', 'priorband', or 'complex_random_search'." ) optimizer_ask, optimizer_info = load_optimizer(optimizer, space) From a50dc20e258d06b1f6a23cc33b984954022b7043 Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 3 Dec 2025 15:37:36 +0100 Subject: [PATCH 143/156] feat: Enhance Categorical formatting and add bracket collapsing for better readability --- neps/space/neps_spaces/sampling.py | 31 ++-- neps/space/neps_spaces/string_formatter.py | 165 ++++++++++++++++----- 2 files changed, 143 insertions(+), 53 deletions(-) diff --git a/neps/space/neps_spaces/sampling.py b/neps/space/neps_spaces/sampling.py index cc6b988f0..36d228dc1 100644 --- a/neps/space/neps_spaces/sampling.py +++ b/neps/space/neps_spaces/sampling.py @@ -149,7 +149,7 @@ def __call__( class IOSampler(DomainSampler): """A sampler that samples by asking the user at each decision.""" - def __call__( # noqa: C901 + def __call__( # noqa: C901, PLR0912 self, *, domain_obj: Domain[T], @@ -172,21 +172,28 @@ def __call__( # noqa: C901 f" {domain_obj.upper}]: ", # type: ignore[attr-defined] ) elif isinstance(domain_obj, Categorical): + from neps.space.neps_spaces.string_formatter import format_value + + # Format choices and check for multi-line content + formatted_choices = [format_value(c, indent=0) for c in domain_obj.choices] # type: ignore[attr-defined, arg-type] + has_multiline = any("\n" in formatted for formatted in formatted_choices) + + # Build choices display + choices_lines = [""] if has_multiline else [] + for n, formatted in enumerate(formatted_choices): + if "\n" in formatted: + choices_lines.append(f"Option {n}:") + choices_lines.append(formatted) + else: + choices_lines.append(f"Option {n}: {formatted}") + if has_multiline and n < len(formatted_choices) - 1: + choices_lines.append("") # Blank line separator between options - def format_choice(choice: Any) -> str: - """Format a choice nicely, especially for callables.""" - if callable(choice) and (name := getattr(choice, "__name__", None)): - return name - return str(choice) - - choices_list = "\n\t".join( - f"{n}: {format_choice(choice)}" - for n, choice in enumerate(domain_obj.choices) # type: ignore[attr-defined, arg-type] - ) + choices_list = "\n".join(choices_lines) max_index = int(domain_obj.range_compatibility_identifier) - 1 # type: ignore[attr-defined] print( f"Please provide an index for '{current_path}'\n" - f"Choices:\n\t{choices_list}\n" + f"Choices:{choices_list}\n" f"Valid range: [0, {max_index}]: " ) diff --git a/neps/space/neps_spaces/string_formatter.py b/neps/space/neps_spaces/string_formatter.py index 0b8b9eb3c..b367a8ad0 100644 --- a/neps/space/neps_spaces/string_formatter.py +++ b/neps/space/neps_spaces/string_formatter.py @@ -128,6 +128,68 @@ def format_value( # noqa: C901, PLR0911, PLR0912 return repr(value) +# ============================================================================ +# HELPER FUNCTIONS +# ============================================================================ + + +def _collapse_closing_brackets(text: str) -> str: + """Collapse consecutive closing brackets onto same line respecting indentation. + + Transforms: + ) + ) + ) + Into: + ) ) ) + + All brackets are placed on the same line using the minimum indentation. + + Args: + text: The formatted text + + Returns: + Text with collapsed closing brackets + """ + lines = text.split("\n") + result = [] + i = 0 + + while i < len(lines): + current_line = lines[i] + stripped = current_line.strip() + + # Check if this line contains only closing brackets + if stripped and all(c in ")]" for c in stripped): + # Collect consecutive bracket lines + bracket_lines = [current_line] + j = i + 1 + while ( + j < len(lines) + and lines[j].strip() + and all(c in ")]" for c in lines[j].strip()) + ): + bracket_lines.append(lines[j]) + j += 1 + + # Collapse if multiple bracket lines + if len(bracket_lines) > 1: + # Find minimum indentation + min_indent = min(len(line) - len(line.lstrip()) for line in bracket_lines) + # Collapse onto single line + combined = " ".join(line.strip() for line in bracket_lines) + result.append(" " * min_indent + combined) + else: + result.append(current_line) + + i = j + else: + result.append(current_line) + i += 1 + + return "\n".join(result) + + # ============================================================================ # INTERNAL FORMATTERS - Type-specific formatting logic # All these call format_value() for nested values to maintain consistency @@ -158,6 +220,7 @@ def _format_categorical( """Internal formatter for Categorical parameters.""" indent_str = style.indent_str * indent inner_indent_str = style.indent_str * (indent + 1) + choice_indent_str = style.indent_str * (indent + 2) # Format each choice using format_value for consistency formatted_choices = [] @@ -165,13 +228,29 @@ def _format_categorical( choice_str = format_value(choice, indent + 2, style) formatted_choices.append(choice_str) - # Build the string with consistent indentation - choice_indent_str = style.indent_str * (indent + 2) - choices_str = f",\n{choice_indent_str}".join(formatted_choices) - result = ( - f"Categorical(\n{inner_indent_str}choices=" - f"(\n{choice_indent_str}{choices_str}\n{inner_indent_str})" - ) + # Check if all choices are simple (strings or numbers without newlines) + all_simple = all("\n" not in choice_str for choice_str in formatted_choices) + + if all_simple and formatted_choices: + # Try to fit choices on one line + choices_str = ", ".join(formatted_choices) + if len(choices_str) <= style.max_line_length: + # Put choices on own line, indented + result = f"Categorical(\n{inner_indent_str}choices=({choices_str})" + else: + # Put on multiple lines but keep choices together + choices_str = f",\n{choice_indent_str}".join(formatted_choices) + result = ( + f"Categorical(\n{inner_indent_str}choices=(\n" + f"{choice_indent_str}{choices_str})" + ) + else: + # Complex choices - use multi-line format + choices_str = f",\n{choice_indent_str}".join(formatted_choices) + result = ( + f"Categorical(\n{inner_indent_str}choices=(\n" + f"{choice_indent_str}{choices_str}\n{inner_indent_str})" + ) if categorical.has_prior: prior_confidence_str = _format_prior_confidence(categorical._prior_confidence) @@ -181,7 +260,7 @@ def _format_categorical( ) result += f"\n{indent_str})" - return result + return _collapse_closing_brackets(result) def _format_float( @@ -233,7 +312,8 @@ def _format_resampled( if "\n" in source_str: indent_str = style.indent_str * indent inner_indent_str = style.indent_str * (indent + 1) - return f"Resample(\n{inner_indent_str}{source_str}\n{indent_str})" + result = f"Resample(\n{inner_indent_str}{source_str}\n{indent_str})" + return _collapse_closing_brackets(result) # Simple single-line format for basic types return f"Resample({source_str})" @@ -260,53 +340,56 @@ def _format_sequence( if not seq: return "[]" if isinstance(seq, list) else "()" - # Try compact format first + # Format all items + formatted_items = [format_value(item, indent + 1, style) for item in seq] + + # Check for "Nx" shorthand case (all items identical) + if len(set(formatted_items)) == 1 and len(seq) > 1: + return f"{len(seq)}x {formatted_items[0]}" + + # Try compact format for simple sequences compact = repr(seq) if len(compact) <= style.compact_threshold and "\n" not in compact: return compact - # Use expanded format for complex sequences + # Expand multi-line or complex sequences is_list = isinstance(seq, list) bracket_open, bracket_close = ("[", "]") if is_list else ("(", ")") - indent_str = style.indent_str * indent inner_indent_str = style.indent_str * (indent + 1) - # Check if any element is an Operation (needs expansion) - has_operations = any(isinstance(item, Operation) for item in seq) + # Check if expansion is needed (Operations or multi-line items) + needs_expansion = any( + isinstance(item, Operation) or "\n" in item_str + for item, item_str in zip(seq, formatted_items, strict=False) + ) - if has_operations: - # Full expansion with each item on its own line + if needs_expansion: + # Full expansion: each item on its own line lines = [bracket_open] - for item in seq: - formatted = format_value(item, indent + 1, style) - lines.append(f"{inner_indent_str}{formatted},") + lines.extend(f"{inner_indent_str}{item}," for item in formatted_items) lines.append(f"{indent_str}{bracket_close}") - return "\n".join(lines) - - # Simple items - try to fit multiple per line - lines = [bracket_open] - current_line: list[str] = [] - current_length = 0 - - for item in seq: - item_repr = repr(item) - item_len = len(item_repr) + 2 # +2 for ", " + else: + # Compact expansion: fit multiple items per line + lines = [bracket_open] + current_line: list[str] = [] + current_length = 0 + + for item_str in formatted_items: + item_len = len(item_str) + 2 # +2 for ", " + if current_line and current_length + item_len > style.max_line_length: + lines.append(f"{inner_indent_str}{', '.join(current_line)},") + current_line, current_length = [item_str], len(item_str) + else: + current_line.append(item_str) + current_length += item_len - if current_line and current_length + item_len > style.max_line_length: - # Start new line + if current_line: lines.append(f"{inner_indent_str}{', '.join(current_line)},") - current_line = [item_repr] - current_length = len(item_repr) - else: - current_line.append(item_repr) - current_length += item_len - - if current_line: - lines.append(f"{inner_indent_str}{', '.join(current_line)},") + lines.append(f"{indent_str}{bracket_close}") - lines.append(f"{indent_str}{bracket_close}") - return "\n".join(lines) + result = "\n".join(lines) + return _collapse_closing_brackets(result) def _format_dict( From 2f1333bdf0fb9a95e114c1c4bd5369801875b1de Mon Sep 17 00:00:00 2001 From: Meganton Date: Wed, 3 Dec 2025 17:48:17 +0100 Subject: [PATCH 144/156] fix: Improve error handling for args and kwargs in _format_operation function --- neps/space/neps_spaces/sampling.py | 2 +- neps/space/neps_spaces/string_formatter.py | 46 +++++++++++++++------- tests/test_state/test_neps_state.py | 3 ++ 3 files changed, 35 insertions(+), 16 deletions(-) diff --git a/neps/space/neps_spaces/sampling.py b/neps/space/neps_spaces/sampling.py index 36d228dc1..5f1136a8e 100644 --- a/neps/space/neps_spaces/sampling.py +++ b/neps/space/neps_spaces/sampling.py @@ -193,7 +193,7 @@ def __call__( # noqa: C901, PLR0912 max_index = int(domain_obj.range_compatibility_identifier) - 1 # type: ignore[attr-defined] print( f"Please provide an index for '{current_path}'\n" - f"Choices:{choices_list}\n" + f"Choices:\n{choices_list}\n" f"Valid range: [0, {max_index}]: " ) diff --git a/neps/space/neps_spaces/string_formatter.py b/neps/space/neps_spaces/string_formatter.py index b367a8ad0..e644eb0ad 100644 --- a/neps/space/neps_spaces/string_formatter.py +++ b/neps/space/neps_spaces/string_formatter.py @@ -431,30 +431,46 @@ def _format_operation( else operation.operator.__name__ ) - # Check if we have any args or kwargs - has_args = bool(operation.args) - has_kwargs = bool(operation.kwargs) + # Helper to safely get args/kwargs, handling unresolved Resolvables + def safe_get_args() -> tuple[Any, ...]: + """Get args, handling unresolved Resolvables by wrapping if needed.""" + try: + return operation.args + except ValueError: + # Args contain unresolved Resolvables, use raw _args + args = operation._args + # Wrap single Resolvable in tuple for iteration + return (args,) if not isinstance(args, tuple | list) else args + + def safe_get_kwargs() -> dict[str, Any]: + """Get kwargs, handling unresolved Resolvables.""" + try: + return dict(operation.kwargs) + except ValueError: + # Kwargs contain unresolved Resolvables, skip or use raw _kwargs + kwargs = operation._kwargs + return kwargs if isinstance(kwargs, dict) else {} + + # Check if we have any content + has_args = bool(safe_get_args()) + has_kwargs = bool(safe_get_kwargs()) if not (has_args or has_kwargs): return f"{operator_name}()" if style.show_empty_args else operator_name - # Always use multi-line format for consistency + # Format with multi-line layout indent_str = style.indent_str * indent inner_indent_str = style.indent_str * (indent + 1) lines = [f"{operator_name}("] - # Format args using format_value - if has_args: - for arg in operation.args: - formatted = format_value(arg, indent + 1, style) - lines.append(f"{inner_indent_str}{formatted},") - - # Format kwargs using format_value - if has_kwargs: - for key, value in operation.kwargs.items(): - formatted_value = format_value(value, indent + 1, style) - lines.append(f"{inner_indent_str}{key}={formatted_value},") + for arg in safe_get_args(): + formatted = format_value(arg, indent + 1, style) + lines.append(f"{inner_indent_str}{formatted},") + + for key, value in safe_get_kwargs().items(): + formatted_value = format_value(value, indent + 1, style) + lines.append(f"{inner_indent_str}{key}={formatted_value},") lines.append(f"{indent_str})") diff --git a/tests/test_state/test_neps_state.py b/tests/test_state/test_neps_state.py index c2aa2f5f4..9baba27a1 100644 --- a/tests/test_state/test_neps_state.py +++ b/tests/test_state/test_neps_state.py @@ -107,6 +107,7 @@ class SpaceFidPrior(PipelineSpace): "pibo", "neps_random_search", "complex_random_search", + "neps_regularized_evolution", ] NO_DEFAULT_PRIOR_SUPPORT = [ "grid_search", @@ -121,6 +122,7 @@ class SpaceFidPrior(PipelineSpace): "mo_hyperband", "neps_random_search", "complex_random_search", + "neps_regularized_evolution", ] REQUIRES_PRIOR = [ "pibo", @@ -141,6 +143,7 @@ class SpaceFidPrior(PipelineSpace): "neps_random_search", "complex_random_search", "neps_hyperband", + "neps_regularized_evolution", ] From 5beb7f63d5c1e178490f3ec566946805d9c198d4 Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 4 Dec 2025 11:12:31 +0100 Subject: [PATCH 145/156] fix: Update kernel_size in test_list_as_arg for Conv2d operation --- tests/test_neps_space/test_string_formatter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_neps_space/test_string_formatter.py b/tests/test_neps_space/test_string_formatter.py index e0d9f9f69..37657a276 100644 --- a/tests/test_neps_space/test_string_formatter.py +++ b/tests/test_neps_space/test_string_formatter.py @@ -103,10 +103,10 @@ def test_deeply_nested_operations(): def test_list_as_arg(): """Test formatting with a list as an argument.""" - op = Operation(operator="Conv2d", kwargs={"kernel_size": [3, 3]}) + op = Operation(operator="Conv2d", kwargs={"kernel_size": [3, 4]}) result = format_value(op) expected = """Conv2d( - kernel_size=[3, 3], + kernel_size=[3, 4], )""" assert result == expected From de1d4caf8dbae64fa92e5e7b1fb2068663e0ccdb Mon Sep 17 00:00:00 2001 From: Meganton Date: Thu, 4 Dec 2025 12:02:24 +0100 Subject: [PATCH 146/156] fix: Update kernel_size in Conv2d operations from [3, 3] to [3, 4] --- tests/test_neps_space/test_string_formatter.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_neps_space/test_string_formatter.py b/tests/test_neps_space/test_string_formatter.py index 37657a276..ac822e01b 100644 --- a/tests/test_neps_space/test_string_formatter.py +++ b/tests/test_neps_space/test_string_formatter.py @@ -229,7 +229,7 @@ def test_complex_nested_structure(): conv = Operation( operator="Conv2d", - kwargs={"in_channels": 3, "out_channels": 64, "kernel_size": [3, 3]}, + kwargs={"in_channels": 3, "out_channels": 64, "kernel_size": [3, 4]}, ) relu = Operation(operator="ReLU") @@ -245,7 +245,7 @@ def test_complex_nested_structure(): assert "Sequential(" in result assert "Conv2d(" in result assert "in_channels=3" in result - assert "kernel_size=[3, 3]" in result + assert "kernel_size=[3, 4]" in result assert "ReLU()," in result assert "dropout=0.5" in result assert "config=" in result @@ -334,14 +334,14 @@ def test_real_world_example(): # Build a realistic example similar to architecture_search.py conv1 = Operation( operator="Conv2d", - kwargs={"in_channels": 3, "out_channels": 64, "kernel_size": [3, 3]}, + kwargs={"in_channels": 3, "out_channels": 64, "kernel_size": [3, 4]}, ) relu1 = Operation(operator="ReLU") pool1 = Operation(operator="MaxPool2d", kwargs={"kernel_size": 2, "stride": 2}) conv2 = Operation( operator="Conv2d", - kwargs={"in_channels": 64, "out_channels": 128, "kernel_size": [3, 3]}, + kwargs={"in_channels": 64, "out_channels": 128, "kernel_size": [3, 4]}, ) relu2 = Operation(operator="ReLU") pool2 = Operation(operator="MaxPool2d", kwargs={"kernel_size": 2, "stride": 2}) @@ -363,7 +363,7 @@ def test_real_world_example(): assert "Conv2d(" in result assert "in_channels=3" in result assert "out_channels=64" in result - assert "kernel_size=[3, 3]" in result + assert "kernel_size=[3, 4]" in result assert "ReLU()," in result assert "MaxPool2d(" in result assert "Flatten()," in result From c9bca14e94fb3ea7bafb16a02b144515ea6bedfe Mon Sep 17 00:00:00 2001 From: Meganton Date: Sat, 6 Dec 2025 22:07:31 +0100 Subject: [PATCH 147/156] feat: Add LocalAndIncumbent optimizer and integrate with bracket optimizer --- neps/optimizers/algorithms.py | 53 +++++++- neps/optimizers/neps_bracket_optimizer.py | 5 +- neps/optimizers/neps_local_and_incumbent.py | 136 ++++++++++++++++++++ 3 files changed, 189 insertions(+), 5 deletions(-) create mode 100644 neps/optimizers/neps_local_and_incumbent.py diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 81c069f00..c5e0b9173 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -35,6 +35,7 @@ from neps.optimizers.models.ftpfn import FTPFNSurrogate from neps.optimizers.mopriors import MOPriorSampler from neps.optimizers.neps_bracket_optimizer import _NePSBracketOptimizer +from neps.optimizers.neps_local_and_incumbent import NePSLocalPriorIncumbentSampler from neps.optimizers.neps_priorband import NePSPriorBandSampler from neps.optimizers.neps_random_search import ( NePSComplexRandomSearch, @@ -1626,15 +1627,16 @@ def neps_random_search( ) -def _neps_bracket_optimizer( +def _neps_bracket_optimizer( # noqa: C901 pipeline_space: PipelineSpace, *, bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], eta: int, - sampler: Literal["priorband", "uniform", "prior"], + sampler: Literal["priorband", "uniform", "prior", "local_and_incumbent"], sample_prior_first: bool | Literal["highest_fidelity"], early_stopping_rate: int | None, - inc_ratio: float = 0.9, + inc_ratio: float | None = 0.9, + local_prior: dict[str, Any] | None = None, ) -> _NePSBracketOptimizer: fidelity_attrs = pipeline_space.fidelity_attrs @@ -1709,9 +1711,10 @@ def _neps_bracket_optimizer( case _: raise ValueError(f"Unknown bracket type: {bracket_type}") - _sampler: NePSPriorBandSampler | DomainSampler + _sampler: NePSPriorBandSampler | DomainSampler | NePSLocalPriorIncumbentSampler match sampler: case "priorband": + assert inc_ratio is not None _sampler = NePSPriorBandSampler( space=pipeline_space, eta=eta, @@ -1721,6 +1724,12 @@ def _neps_bracket_optimizer( fid_bounds=(fidelity_obj.lower, fidelity_obj.upper), inc_ratio=inc_ratio, ) + case "local_and_incumbent": + assert local_prior is not None + _sampler = NePSLocalPriorIncumbentSampler( + space=pipeline_space, + local_prior=local_prior, + ) case "uniform": _sampler = RandomSampler({}) case "prior": @@ -1804,6 +1813,40 @@ def neps_regularized_evolution( ) +def neps_local_and_incumbent( + pipeline_space: PipelineSpace, + *, + eta: int = 3, + base: Literal["successive_halving", "hyperband", "asha", "async_hb"] = "hyperband", +) -> _NePSBracketOptimizer: + """Create a LocalAndIncumbent optimizer for the given pipeline space. + + Args: + pipeline_space: The pipeline space to optimize over. + base: The type of bracket optimizer to use. One of: + - "successive_halving" + - "hyperband" + - "asha" + - "async_hb" + Returns: + An instance of _BracketOptimizer configured for LocalAndIncumbent sampling. + """ + if pipeline_space.has_priors(): + logger.warning( + "Warning: Priors are defined in the search space, but LocalAndIncumbent does" + " not use them." + ) + return _neps_bracket_optimizer( + pipeline_space=pipeline_space, + bracket_type=base, + eta=eta, + sampler="local_and_incumbent", + sample_prior_first=False, + early_stopping_rate=0 if base in ("successive_halving", "asha") else None, + inc_ratio=None, + ) + + PredefinedOptimizers: Mapping[str, Any] = { f.__name__: f for f in ( @@ -1825,6 +1868,7 @@ def neps_regularized_evolution( neps_priorband, neps_hyperband, neps_regularized_evolution, + neps_local_and_incumbent, ) } @@ -1847,4 +1891,5 @@ def neps_regularized_evolution( "neps_priorband", "neps_hyperband", "neps_regularized_evolution", + "neps_local_and_incumbent", ] diff --git a/neps/optimizers/neps_bracket_optimizer.py b/neps/optimizers/neps_bracket_optimizer.py index 654426905..eb0270d2f 100644 --- a/neps/optimizers/neps_bracket_optimizer.py +++ b/neps/optimizers/neps_bracket_optimizer.py @@ -16,6 +16,7 @@ import pandas as pd import neps.optimizers.bracket_optimizer as standard_bracket_optimizer +from neps.optimizers.neps_local_and_incumbent import NePSLocalPriorIncumbentSampler from neps.optimizers.neps_priorband import NePSPriorBandSampler from neps.optimizers.optimizer import ImportedConfig, SampledConfig from neps.optimizers.utils.brackets import PromoteAction, SampleAction @@ -65,7 +66,7 @@ class _NePSBracketOptimizer: create_brackets: Callable[[pd.DataFrame], Sequence[Bracket] | Bracket] """The sampler used to generate new trials.""" - sampler: NePSPriorBandSampler | DomainSampler + sampler: NePSPriorBandSampler | DomainSampler | NePSLocalPriorIncumbentSampler """The name of the fidelity in the space.""" fid_name: str @@ -145,6 +146,8 @@ def __call__( # noqa: C901, PLR0912 case SampleAction(rung=rung): if isinstance(self.sampler, NePSPriorBandSampler): config = self.sampler.sample_config(table, rung=rung) + elif isinstance(self.sampler, NePSLocalPriorIncumbentSampler): + config = self.sampler.sample_config(table) elif isinstance(self.sampler, DomainSampler): environment_values = {} fidelity_attrs = self.space.fidelity_attrs diff --git a/neps/optimizers/neps_local_and_incumbent.py b/neps/optimizers/neps_local_and_incumbent.py new file mode 100644 index 000000000..a656e32f5 --- /dev/null +++ b/neps/optimizers/neps_local_and_incumbent.py @@ -0,0 +1,136 @@ +from __future__ import annotations + +import random +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Literal + +import neps.space.neps_spaces.sampling +from neps.space.neps_spaces import neps_space, sampling + +if TYPE_CHECKING: + import pandas as pd + + from neps.space.neps_spaces.parameters import PipelineSpace + + +@dataclass +class NePSLocalPriorIncumbentSampler: + """Implement a sampler that samples from the incumbent.""" + + space: PipelineSpace + """The pipeline space to optimize over.""" + + local_prior: dict[str, Any] + """The first config to sample.""" + + inc_takeover_mode: Literal[0, 1, 2, 3] = 0 + """The incumbent takeover mode. + 0: Always mutate the first config. + 1: Use the global incumbent. + 2: Crossover between global incumbent and first config. + 3: Choose randomly between 0, 1, and 2. + """ + + def sample_config(self, table: pd.DataFrame) -> dict[str, Any]: + """Sample a configuration based on the PriorBand algorithm. + + Args: + table (pd.DataFrame): The table containing the configurations and their + performance. + rung (int): The current rung of the optimization. + + Returns: + dict[str, Any]: A sampled configuration. + """ + + completed: pd.DataFrame = table[table["perf"].notna()] # type: ignore + if completed.empty: + return self.local_prior + + # Get the incumbent configuration + inc_config = completed.loc[completed["perf"].idxmin()]["config"] + first_config = self.local_prior + assert isinstance(inc_config, dict) + + match self.inc_takeover_mode: + case 0: + # Always mutate the first config. + new_config = self._mutate_inc(inc_config=first_config) + case 1: + # Use the global incumbent. + new_config = self._mutate_inc(inc_config=inc_config) + case 2: + # Crossover between global incumbent and first config. + new_config = self._crossover_incs( + inc_config=inc_config, + first_config=first_config, + ) + case 3: + # Choose randomly between 0, 1, and 2. + match random.randint(0, 2): + case 0: + new_config = self._mutate_inc(inc_config=first_config) + case 1: + new_config = self._mutate_inc(inc_config=inc_config) + case 2: + new_config = self._crossover_incs( + inc_config=inc_config, + first_config=first_config, + ) + case _: + raise ValueError( + "This should never happen. Only for type checking." + ) + case _: + raise ValueError(f"Invalid inc_takeover_mode: {self.inc_takeover_mode}") + return new_config + + def _mutate_inc(self, inc_config: dict[str, Any]) -> dict[str, Any]: + data = neps_space.NepsCompatConverter.from_neps_config(config=inc_config) + + _resolved_pipeline, resolution_context = neps_space.resolve( + pipeline=self.space, + domain_sampler=neps.space.neps_spaces.sampling.MutatateUsingCentersSampler( + predefined_samplings=data.predefined_samplings, + n_mutations=max(1, random.randint(1, int(len(inc_config) / 2))), + ), + environment_values=data.environment_values, + ) + + config = neps_space.NepsCompatConverter.to_neps_config(resolution_context) + return dict(**config) + + def _crossover_incs( + self, inc_config: dict[str, Any], first_config: dict[str, Any] + ) -> dict[str, Any]: + _environment_values = {} + _fidelity_attrs = self.space.fidelity_attrs + for fidelity_name, fidelity_obj in _fidelity_attrs.items(): + _environment_values[fidelity_name] = fidelity_obj.upper + + # Crossover between the best two trials' configs to create a new config. + try: + crossover_sampler = sampling.CrossoverByMixingSampler( + predefined_samplings_1=inc_config, + predefined_samplings_2=first_config, + prefer_first_probability=0.5, + ) + _resolved_pipeline, resolution_context = neps_space.resolve( + pipeline=self.space, + domain_sampler=crossover_sampler, + environment_values=_environment_values, + ) + except sampling.CrossoverNotPossibleError: + # A crossover was not possible for them. Increase configs and try again. + # If we have tried all crossovers, mutate the best instead. + # Mutate 50% of the top trial's config. + _resolved_pipeline, resolution_context = neps_space.resolve( + pipeline=self.space, + domain_sampler=sampling.MutatateUsingCentersSampler( + predefined_samplings=inc_config, + n_mutations=max(1, int(len(inc_config) / 2)), + ), + environment_values=_environment_values, + ) + config = neps_space.NepsCompatConverter.to_neps_config(resolution_context) + return dict(**config) From 25bab2d661a71f10ed546c46535138f843401665 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sat, 6 Dec 2025 23:37:07 +0100 Subject: [PATCH 148/156] feat: Add local_prior parameter to neps_local_and_incumbent function --- neps/optimizers/algorithms.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index c5e0b9173..80ae5a6d6 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -1816,6 +1816,7 @@ def neps_regularized_evolution( def neps_local_and_incumbent( pipeline_space: PipelineSpace, *, + local_prior: dict[str, Any], eta: int = 3, base: Literal["successive_halving", "hyperband", "asha", "async_hb"] = "hyperband", ) -> _NePSBracketOptimizer: @@ -1844,6 +1845,7 @@ def neps_local_and_incumbent( sample_prior_first=False, early_stopping_rate=0 if base in ("successive_halving", "asha") else None, inc_ratio=None, + local_prior=local_prior, ) From ffb9160902395712299bc1e137f87120ad4e7333 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sat, 6 Dec 2025 23:43:42 +0100 Subject: [PATCH 149/156] feat: Add inc_takeover_mode parameter to _neps_bracket_optimizer and neps_local_and_incumbent functions --- neps/optimizers/algorithms.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index 80ae5a6d6..fd0c017c5 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -1637,6 +1637,7 @@ def _neps_bracket_optimizer( # noqa: C901 early_stopping_rate: int | None, inc_ratio: float | None = 0.9, local_prior: dict[str, Any] | None = None, + inc_takeover_mode: Literal[0, 1, 2, 3] | None = None, ) -> _NePSBracketOptimizer: fidelity_attrs = pipeline_space.fidelity_attrs @@ -1726,9 +1727,11 @@ def _neps_bracket_optimizer( # noqa: C901 ) case "local_and_incumbent": assert local_prior is not None + assert inc_takeover_mode is not None _sampler = NePSLocalPriorIncumbentSampler( space=pipeline_space, local_prior=local_prior, + inc_takeover_mode=inc_takeover_mode, ) case "uniform": _sampler = RandomSampler({}) @@ -1817,6 +1820,7 @@ def neps_local_and_incumbent( pipeline_space: PipelineSpace, *, local_prior: dict[str, Any], + inc_takeover_mode: Literal[0, 1, 2, 3] = 0, eta: int = 3, base: Literal["successive_halving", "hyperband", "asha", "async_hb"] = "hyperband", ) -> _NePSBracketOptimizer: @@ -1846,6 +1850,7 @@ def neps_local_and_incumbent( early_stopping_rate=0 if base in ("successive_halving", "asha") else None, inc_ratio=None, local_prior=local_prior, + inc_takeover_mode=inc_takeover_mode, ) From 1961ce84b0ad5f84cd987c62efd3012d0b7f2317 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 7 Dec 2025 00:09:35 +0100 Subject: [PATCH 150/156] fix: Update _mutate_inc method to use fidelity attributes for environment values --- neps/optimizers/neps_local_and_incumbent.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/neps/optimizers/neps_local_and_incumbent.py b/neps/optimizers/neps_local_and_incumbent.py index a656e32f5..92328bcf8 100644 --- a/neps/optimizers/neps_local_and_incumbent.py +++ b/neps/optimizers/neps_local_and_incumbent.py @@ -86,15 +86,18 @@ def sample_config(self, table: pd.DataFrame) -> dict[str, Any]: return new_config def _mutate_inc(self, inc_config: dict[str, Any]) -> dict[str, Any]: - data = neps_space.NepsCompatConverter.from_neps_config(config=inc_config) + _environment_values = {} + _fidelity_attrs = self.space.fidelity_attrs + for fidelity_name, fidelity_obj in _fidelity_attrs.items(): + _environment_values[fidelity_name] = fidelity_obj.upper _resolved_pipeline, resolution_context = neps_space.resolve( pipeline=self.space, domain_sampler=neps.space.neps_spaces.sampling.MutatateUsingCentersSampler( - predefined_samplings=data.predefined_samplings, + predefined_samplings=inc_config, n_mutations=max(1, random.randint(1, int(len(inc_config) / 2))), ), - environment_values=data.environment_values, + environment_values=_environment_values, ) config = neps_space.NepsCompatConverter.to_neps_config(resolution_context) From 34fb82321dbc8f7b28fbffb41554adc354ea0c38 Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 7 Dec 2025 01:15:27 +0100 Subject: [PATCH 151/156] fix: Update load_config to require either config or config_path and improve error handling --- neps/api.py | 240 ++++++++++++++++++++++++++++------------------------ 1 file changed, 129 insertions(+), 111 deletions(-) diff --git a/neps/api.py b/neps/api.py index b4f4ee2b7..9aa4f1f60 100644 --- a/neps/api.py +++ b/neps/api.py @@ -789,16 +789,18 @@ def create_config( # noqa: C901 def load_config( # noqa: C901, PLR0912, PLR0915 - config_path: Path | str, pipeline_space: PipelineSpace | SearchSpace | None = None, + config: dict[str, Any] | None = None, + config_path: Path | str | None = None, config_id: str | None = None, ) -> dict[str, Any]: """Load a configuration from a neps config file. Args: - config_path: Path to the neps config file. pipeline_space: The pipeline space used to generate the configuration. If None, will attempt to load from the NePSState directory. + config: Optional configuration dictionary to return directly. + config_path: Path to the neps config file. config_id: Optional config id to load, when only giving results folder. Returns: @@ -810,112 +812,126 @@ def load_config( # noqa: C901, PLR0912, PLR0915 from neps.space.neps_spaces.neps_space import NepsCompatConverter from neps.space.neps_spaces.sampling import OnlyPredefinedValuesSampler - # Try to load pipeline_space from NePSState if not provided - state = None # Track state for later use in config loading - - if pipeline_space is None: - try: - # Extract the root directory from config_path - str_path_temp = str(config_path) - if "/configs/" in str_path_temp or "\\configs\\" in str_path_temp: - root_dir = Path( - str_path_temp.split("/configs/")[0].split("\\configs\\")[0] - ) - # If no /configs/ in path, assume it's either: - # 1. The root directory itself - # 2. A direct config file path (ends with .yaml/.yml) - elif str_path_temp.endswith((".yaml", ".yml")): - # It's a direct config file path, go up two levels - root_dir = Path(str_path_temp).parent.parent - else: - # It's the root directory itself - root_dir = Path(str_path_temp) - - state = NePSState.create_or_load(path=root_dir, load_only=True) - pipeline_space = state.lock_and_get_search_space() + if config is None: + if config_path is None: + raise ValueError("Either config or config_path must be provided.") + # Try to load pipeline_space from NePSState if not provided + state = None # Track state for later use in config loading - if pipeline_space is None: + if pipeline_space is None: + try: + # Extract the root directory from config_path + str_path_temp = str(config_path) + if "/configs/" in str_path_temp or "\\configs\\" in str_path_temp: + root_dir = Path( + str_path_temp.split("/configs/")[0].split("\\configs\\")[0] + ) + # If no /configs/ in path, assume it's either: + # 1. The root directory itself + # 2. A direct config file path (ends with .yaml/.yml) + elif str_path_temp.endswith((".yaml", ".yml")): + # It's a direct config file path, go up two levels + root_dir = Path(str_path_temp).parent.parent + else: + # It's the root directory itself + root_dir = Path(str_path_temp) + + state = NePSState.create_or_load(path=root_dir, load_only=True) + pipeline_space = state.lock_and_get_search_space() + + if pipeline_space is None: + raise ValueError( + "Could not load pipeline_space from disk. " + "Please provide pipeline_space argument or ensure " + "the NePSState was created with search_space saved." + ) + except Exception as e: raise ValueError( - "Could not load pipeline_space from disk. " - "Please provide pipeline_space argument or ensure " - "the NePSState was created with search_space saved." - ) - except Exception as e: - raise ValueError( - f"pipeline_space not provided and could not be loaded from disk: {e}" - ) from e - else: - # User provided a pipeline_space - validate it matches the one on disk - from neps.exceptions import NePSError + f"pipeline_space not provided and could not be loaded from disk: {e}" + ) from e + else: + # User provided a pipeline_space - validate it matches the one on disk + from neps.exceptions import NePSError - try: - str_path_temp = str(config_path) - if "/configs/" in str_path_temp or "\\configs\\" in str_path_temp: - root_dir = Path( - str_path_temp.split("/configs/")[0].split("\\configs\\")[0] - ) - # If no /configs/ in path, assume it's either: - # 1. The root directory itself - # 2. A direct config file path (ends with .yaml/.yml) - elif str_path_temp.endswith((".yaml", ".yml")): - # It's a direct config file path, go up two levels - root_dir = Path(str_path_temp).parent.parent + try: + str_path_temp = str(config_path) + if "/configs/" in str_path_temp or "\\configs\\" in str_path_temp: + root_dir = Path( + str_path_temp.split("/configs/")[0].split("\\configs\\")[0] + ) + # If no /configs/ in path, assume it's either: + # 1. The root directory itself + # 2. A direct config file path (ends with .yaml/.yml) + elif str_path_temp.endswith((".yaml", ".yml")): + # It's a direct config file path, go up two levels + root_dir = Path(str_path_temp).parent.parent + else: + # It's the root directory itself + root_dir = Path(str_path_temp) + + state = NePSState.create_or_load(path=root_dir, load_only=True) + disk_space = state.lock_and_get_search_space() + + if disk_space is not None: + # Validate that provided space matches disk space + import pickle + + if pickle.dumps(disk_space) != pickle.dumps(pipeline_space): + raise NePSError( + "The pipeline_space provided does not match the one saved on" + " disk.\\nPipeline space location:" + f" {root_dir / 'pipeline_space.pkl'}\\nPlease either:\\n 1." + " Don't provide pipeline_space (it will be loaded" + " automatically), or\\n 2. Provide the same pipeline_space" + " that was used in neps.run()" + ) + except NePSError: + raise + except Exception: # noqa: S110, BLE001 + # If we can't load/validate, just continue with provided space + pass + + # Determine config_id from path + str_path = str(config_path) + trial_id = None + + if not str_path.endswith(".yaml") and not str_path.endswith(".yml"): + if str_path.removesuffix("/").split("/")[-1].startswith("config_"): + # Extract trial_id from path like "configs/config_1" + # or "configs/config_1_rung_0" + trial_id = str_path.removesuffix("/").split("/")[-1] else: - # It's the root directory itself - root_dir = Path(str_path_temp) - - state = NePSState.create_or_load(path=root_dir, load_only=True) - disk_space = state.lock_and_get_search_space() - - if disk_space is not None: - # Validate that provided space matches disk space - import pickle - - if pickle.dumps(disk_space) != pickle.dumps(pipeline_space): - raise NePSError( - "The pipeline_space provided does not match the one saved on" - " disk.\\nPipeline space location:" - f" {root_dir / 'pipeline_space.pkl'}\\nPlease either:\\n 1." - " Don't provide pipeline_space (it will be loaded" - " automatically), or\\n 2. Provide the same pipeline_space that" - " was used in neps.run()" + if config_id is None: + raise ValueError( + "When providing a results folder, you must also provide a" + " config_id." ) - except NePSError: - raise - except Exception: # noqa: S110, BLE001 - # If we can't load/validate, just continue with provided space - pass - - # Determine config_id from path - str_path = str(config_path) - trial_id = None - - if not str_path.endswith(".yaml") and not str_path.endswith(".yml"): - if str_path.removesuffix("/").split("/")[-1].startswith("config_"): - # Extract trial_id from path like "configs/config_1" - # or "configs/config_1_rung_0" - trial_id = str_path.removesuffix("/").split("/")[-1] + trial_id = config_id else: - if config_id is None: - raise ValueError( - "When providing a results folder, you must also provide a config_id." - ) - trial_id = config_id - else: - # Extract trial_id from yaml path like "configs/config_1/config.yaml" - path_parts = str_path.replace("\\", "/").split("/") - for i, part in enumerate(path_parts): - if part == "configs" and i + 1 < len(path_parts): - trial_id = path_parts[i + 1] - break - - # Use the locked method from NePSState to safely read the trial - if trial_id is not None and state is not None: - try: - trial = state.lock_and_get_trial_by_id(trial_id) - config_dict = dict(trial.config) # Convert Mapping to dict - except Exception: # noqa: BLE001 - # Fallback to direct file read if trial can't be loaded + # Extract trial_id from yaml path like "configs/config_1/config.yaml" + path_parts = str_path.replace("\\", "/").split("/") + for i, part in enumerate(path_parts): + if part == "configs" and i + 1 < len(path_parts): + trial_id = path_parts[i + 1] + break + + # Use the locked method from NePSState to safely read the trial + if trial_id is not None and state is not None: + try: + trial = state.lock_and_get_trial_by_id(trial_id) + config_dict = dict(trial.config) # Convert Mapping to dict + except Exception: # noqa: BLE001 + # Fallback to direct file read if trial can't be loaded + str_path_fallback = str(config_path) + if not str_path_fallback.endswith( + ".yaml" + ) and not str_path_fallback.endswith(".yml"): + str_path_fallback += "/config.yaml" + config_path = Path(str_path_fallback) + with config_path.open("r") as f: + config_dict = yaml.load(f, Loader=yaml.SafeLoader) + else: + # Fallback to direct file read str_path_fallback = str(config_path) if not str_path_fallback.endswith(".yaml") and not str_path_fallback.endswith( ".yml" @@ -924,16 +940,18 @@ def load_config( # noqa: C901, PLR0912, PLR0915 config_path = Path(str_path_fallback) with config_path.open("r") as f: config_dict = yaml.load(f, Loader=yaml.SafeLoader) + else: - # Fallback to direct file read - str_path_fallback = str(config_path) - if not str_path_fallback.endswith(".yaml") and not str_path_fallback.endswith( - ".yml" - ): - str_path_fallback += "/config.yaml" - config_path = Path(str_path_fallback) - with config_path.open("r") as f: - config_dict = yaml.load(f, Loader=yaml.SafeLoader) + config_dict = config + + if ( + any(NepsCompatConverter._SAMPLING_PREFIX in key for key in config_dict) + and pipeline_space is None + ): + raise ValueError( + "The provided NePS-space config requires the correct pipeline_space to be " + "resolved. Please provide a pipeline_space argument to load_config." + ) # Handle different pipeline space types if not isinstance(pipeline_space, PipelineSpace): From 852055bc729eb603e3d91964bef4d3c7278efedd Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 7 Dec 2025 01:38:32 +0100 Subject: [PATCH 152/156] test: Add additional setup requirement for neps_local_and_incumbent optimizer tests --- tests/test_state/test_neps_state.py | 6 ++++++ tests/test_state/test_search_space_validation.py | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/test_state/test_neps_state.py b/tests/test_state/test_neps_state.py index 9baba27a1..32b5ae763 100644 --- a/tests/test_state/test_neps_state.py +++ b/tests/test_state/test_neps_state.py @@ -144,8 +144,11 @@ class SpaceFidPrior(PipelineSpace): "complex_random_search", "neps_hyperband", "neps_regularized_evolution", + "neps_local_and_incumbent", ] +REQUIRES_ADDTIONAL_SETUP = ["neps_local_and_incumbent"] + @fixture @parametrize("key", list(PredefinedOptimizers.keys())) @@ -174,6 +177,9 @@ def optimizer_and_key_and_search_space( if key in REQUIRES_MO_PRIOR: pytest.xfail("No tests defined for PriMO yet") + if key in REQUIRES_ADDTIONAL_SETUP: + pytest.xfail(f"{key} requires additional setup not implemented in this test") + kwargs: dict[str, Any] = {} opt, _ = load_optimizer((key, kwargs), search_space) # type: ignore converted_space = ( diff --git a/tests/test_state/test_search_space_validation.py b/tests/test_state/test_search_space_validation.py index 89ff71c52..0ad5e4cb3 100644 --- a/tests/test_state/test_search_space_validation.py +++ b/tests/test_state/test_search_space_validation.py @@ -162,7 +162,7 @@ def test_load_config_with_wrong_space_raises_error(tmp_path: Path): # Try to load with wrong pipeline_space - should raise error with pytest.raises(NePSError, match="pipeline_space provided does not match"): - neps.load_config(config_path, pipeline_space=Space2()) + neps.load_config(config_path=config_path, pipeline_space=Space2()) def test_load_config_without_space_auto_loads(tmp_path: Path): @@ -187,7 +187,7 @@ def test_load_config_without_space_auto_loads(tmp_path: Path): config_path = configs[0] / "config.yaml" # Load config without providing space - should auto-load from disk - config = neps.load_config(config_path) + config = neps.load_config(config_path=config_path) assert "x" in config, "Should have x parameter" From 125a23fd9b85757d2e2a4045ac0984562006b02a Mon Sep 17 00:00:00 2001 From: Meganton Date: Sun, 7 Dec 2025 01:40:39 +0100 Subject: [PATCH 153/156] fix: Add xfail for tests requiring additional setup in test_sample_trial --- tests/test_state/test_neps_state.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_state/test_neps_state.py b/tests/test_state/test_neps_state.py index 32b5ae763..d3a13401b 100644 --- a/tests/test_state/test_neps_state.py +++ b/tests/test_state/test_neps_state.py @@ -233,6 +233,9 @@ def test_sample_trial( ) -> None: optimizer, key, search_space = optimizer_and_key_and_search_space + if key in REQUIRES_ADDTIONAL_SETUP: + pytest.xfail(f"{key} requires additional setup not implemented in this test") + assert neps_state.lock_and_read_trials() == {} assert neps_state.lock_and_get_next_pending_trial() is None assert neps_state.lock_and_get_next_pending_trial(n=10) == [] From 6bd092dd272554a5c8fdf7faef52289d8a96ebf2 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 8 Dec 2025 04:36:52 +0100 Subject: [PATCH 154/156] fix: Standardize ignore_fidelity parameter to use "highest_fidelity" across optimizers and related functions feat: Enhance budget logging in DefaultWorker to provide detailed resource usage status --- neps/normalization.py | 31 +------ neps/optimizers/algorithms.py | 91 +++++++++++++------ neps/optimizers/neps_random_search.py | 10 +- neps/optimizers/neps_regularized_evolution.py | 4 +- neps/optimizers/utils/grid.py | 6 +- neps/runtime.py | 70 +++++++++++++- 6 files changed, 142 insertions(+), 70 deletions(-) diff --git a/neps/normalization.py b/neps/normalization.py index 3e8ff3f1f..a16a3743a 100644 --- a/neps/normalization.py +++ b/neps/normalization.py @@ -14,7 +14,7 @@ logger = logging.getLogger(__name__) -def _normalize_imported_config( # noqa: C901, PLR0912 +def _normalize_imported_config( space: SearchSpace | PipelineSpace, config: Mapping[str, float] ) -> dict: """Completes a configuration by adding default values for missing fidelities. @@ -39,38 +39,15 @@ def _normalize_imported_config( # noqa: C901, PLR0912 # Import here to avoid circular import from neps.space.neps_spaces.neps_space import ( NepsCompatConverter, - construct_sampling_path, ) - from neps.space.neps_spaces.parameters import Domain - - all_param_keys = set() - - # Add SAMPLING__ prefixed keys for each parameter - for param_name, param_obj in space.get_attrs().items(): - # Construct the sampling path for this parameter - if isinstance(param_obj, Domain): - sampling_path = construct_sampling_path( - path_parts=["Resolvable", param_name], - domain_obj=param_obj, - ) - all_param_keys.add( - f"{NepsCompatConverter._SAMPLING_PREFIX}{sampling_path}" - ) - - # Add ENVIRONMENT__ prefixed keys for fidelities - for fidelity_name in space.fidelity_attrs: - all_param_keys.add( - f"{NepsCompatConverter._ENVIRONMENT_PREFIX}{fidelity_name}" - ) # copy to avoid modifying the original config normalized_conf = dict(config) for key, fid_param in space.fidelity_attrs.items(): - if key not in normalized_conf: - normalized_conf[NepsCompatConverter._ENVIRONMENT_PREFIX + key] = ( - fid_param.upper - ) + fid_key = NepsCompatConverter._ENVIRONMENT_PREFIX + key + if fid_key not in normalized_conf: + normalized_conf[fid_key] = fid_param.upper # For PipelineSpace, filter out keys that match the expected patterns # Import here to avoid circular import (needed for prefix constants) from neps.space.neps_spaces.neps_space import NepsCompatConverter diff --git a/neps/optimizers/algorithms.py b/neps/optimizers/algorithms.py index fd0c017c5..acc903e9f 100644 --- a/neps/optimizers/algorithms.py +++ b/neps/optimizers/algorithms.py @@ -101,7 +101,7 @@ def _bo( # noqa: C901, PLR0912 If using `cost`, cost must be provided in the reports of the trials. sample_prior_first: Whether to sample the default configuration first. - ignore_fidelity: Whether to ignore fidelity when sampling. + ignore_fidelity: Whether to ignore_fidelity when sampling. In this case, the max fidelity is always used. device: Device to use for the optimization. reference_point: The reference point to use for multi-objective optimization. @@ -464,7 +464,7 @@ def random_search( pipeline_space: SearchSpace | PipelineSpace, *, use_priors: bool = False, - ignore_fidelity: bool | Literal["highest fidelity"] = False, + ignore_fidelity: bool | Literal["highest_fidelity"] = False, ) -> RandomSearch | NePSRandomSearch: """A simple random search algorithm that samples configurations uniformly at random. @@ -474,8 +474,8 @@ def random_search( Args: pipeline_space: The search space to sample from. use_priors: Whether to use priors when sampling. - ignore_fidelity: Whether to ignore fidelity when sampling. - Setting this to "highest fidelity" will always sample at max fidelity. + ignore_fidelity: Whether to ignore_fidelity when sampling. + Setting this to "highest_fidelity" will always sample at max fidelity. Setting this to True will randomly sample from the fidelity like any other parameter. """ @@ -490,16 +490,16 @@ def random_search( assert ignore_fidelity in ( True, False, - "highest fidelity", - ), "ignore_fidelity should be either True, False or 'highest fidelity'" + "highest_fidelity", + ), "ignore_fidelity should be either True, False or 'highest_fidelity'" if not ignore_fidelity and pipeline_space.fidelity is not None: raise ValueError( "Fidelities are not supported for RandomSearch. Consider setting the" " fidelity to a constant value, or setting ignore_fidelity to True to sample" - " from it like any other parameter or 'highest fidelity' to always sample at" + " from it like any other parameter or 'highest_fidelity' to always sample at" f" max fidelity. Got fidelity: {pipeline_space.fidelities} " ) - if ignore_fidelity in (True, "highest fidelity") and pipeline_space.fidelity is None: + if ignore_fidelity in (True, "highest_fidelity") and pipeline_space.fidelity is None: logger.warning( "Warning: You are using ignore_fidelity, but no fidelity is defined in the" " search space. Consider setting ignore_fidelity to False." @@ -509,7 +509,7 @@ def random_search( parameters = {**pipeline_space.searchables, **pipeline_space.fidelities} case False: parameters = {**pipeline_space.searchables} - case "highest fidelity": + case "highest_fidelity": parameters = {**pipeline_space.searchables} if use_priors and not any( @@ -544,7 +544,7 @@ def random_search( def grid_search( pipeline_space: SearchSpace | PipelineSpace, *, - ignore_fidelity: bool | Literal["highest fidelity"] = False, + ignore_fidelity: bool | Literal["highest_fidelity"] = False, size_per_numerical_dimension: int = 5, ) -> GridSearch: """A simple grid search algorithm which discretizes the search @@ -552,8 +552,8 @@ def grid_search( Args: pipeline_space: The search space to sample from. - ignore_fidelity: Whether to ignore fidelity when sampling. - Setting this to "highest fidelity" will always sample at max fidelity. + ignore_fidelity: Whether to ignore_fidelity when sampling. + Setting this to "highest_fidelity" will always sample at max fidelity. Setting this to True will make a grid over the fidelity like any other parameter. size_per_numerical_dimension: The number of points to use per numerical @@ -585,7 +585,7 @@ def grid_search( raise ValueError( "Fidelities are not supported for GridSearch natively. Consider setting the" " fidelity to a constant value, or setting ignore_fidelity to True to sample" - " from it like any other parameter or 'highest fidelity' to always sample at" + " from it like any other parameter or 'highest_fidelity' to always sample at" f" max fidelity. Got fidelity: {pipeline_space.fidelities} " ) @@ -601,7 +601,7 @@ def grid_search( def neps_grid_search( pipeline_space: PipelineSpace, *, - ignore_fidelity: bool | Literal["highest fidelity"] = False, + ignore_fidelity: bool | Literal["highest_fidelity"] = False, size_per_numerical_dimension: int = 5, ) -> GridSearch: """A simple grid search algorithm which discretizes the search @@ -609,8 +609,8 @@ def neps_grid_search( Args: pipeline_space: The search space to sample from. - ignore_fidelity: Whether to ignore fidelity when sampling. - Setting this to "highest fidelity" will always sample at max fidelity. + ignore_fidelity: Whether to ignore_fidelity when sampling. + Setting this to "highest_fidelity" will always sample at max fidelity. Setting this to True will make a grid over the fidelity like any other parameter. size_per_numerical_dimension: The number of points to use per numerical @@ -634,7 +634,7 @@ def neps_grid_search( raise ValueError( "Fidelities are not supported for GridSearch natively. Consider setting the" " fidelity to a constant value, or setting ignore_fidelity to True to sample" - " from it like any other parameter or 'highest fidelity' to always sample at" + " from it like any other parameter or 'highest_fidelity' to always sample at" f" max fidelity. Got fidelity: {pipeline_space.fidelity_attrs} " ) @@ -842,7 +842,7 @@ def successive_halving( values in the search space. sample_prior_first: Whether to sample the prior configuration first, - and if so, should it be at the highest fidelity level. + and if so, should it be at the highest_fidelity level. """ if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) @@ -915,7 +915,7 @@ def hyperband( values in the search space. sample_prior_first: Whether to sample the prior configuration first, - and if so, should it be at the highest fidelity level. + and if so, should it be at the highest_fidelity level. """ if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) @@ -962,7 +962,7 @@ def neps_hyperband( values in the search space. sample_prior_first: Whether to sample the prior configuration first, - and if so, should it be at the highest fidelity level. + and if so, should it be at the highest_fidelity level. """ return _neps_bracket_optimizer( pipeline_space=pipeline_space, @@ -1057,7 +1057,7 @@ def asha( values in the search space. sample_prior_first: Whether to sample the prior configuration first, - and if so, should it be at the highest fidelity. + and if so, should it be at the highest_fidelity. """ if isinstance(pipeline_space, PipelineSpace): converted_space = convert_neps_to_classic_search_space(pipeline_space) @@ -1552,7 +1552,7 @@ def custom( def complex_random_search( pipeline_space: PipelineSpace, *, - ignore_fidelity: bool | Literal["highest fidelity"] = False, + ignore_fidelity: bool | Literal["highest_fidelity"] = False, ) -> NePSComplexRandomSearch: """A complex random search algorithm that samples configurations uniformly at random, but allows for more complex sampling strategies. @@ -1561,7 +1561,7 @@ def complex_random_search( pipeline_space: The search space to sample from. ignore_fidelity: Whether to ignore the fidelity parameter when sampling. If `True`, the algorithm will sample the fidelity like a normal parameter. - If set to `"highest fidelity"`, it will always sample at the highest fidelity. + If set to `"highest_fidelity"`, it will always sample at the highest_fidelity. Raises: ValueError: If the pipeline has fidelity attributes and `ignore_fidelity` is set to `False`. Complex random search does not support fidelities by default. @@ -1570,7 +1570,7 @@ def complex_random_search( if pipeline_space.fidelity_attrs and ignore_fidelity is False: raise ValueError( "Complex Random Search does not support fidelities by default." - "Consider using `ignore_fidelity=True` or `highest fidelity`" + "Consider using `ignore_fidelity=True` or `highest_fidelity`" "to always sample at max fidelity." ) if not pipeline_space.fidelity_attrs and ignore_fidelity is not False: @@ -1589,7 +1589,7 @@ def neps_random_search( pipeline_space: PipelineSpace, *, use_priors: bool = False, - ignore_fidelity: bool | Literal["highest fidelity"] = False, + ignore_fidelity: bool | Literal["highest_fidelity"] = False, ) -> NePSRandomSearch: """A simple random search algorithm that samples configurations uniformly at random. @@ -1600,7 +1600,7 @@ def neps_random_search( defined in the search space. ignore_fidelity: Whether to ignore the fidelity parameter when sampling. If `True`, the algorithm will sample the fidelity like a normal parameter. - If set to `"highest fidelity"`, it will always sample at the highest fidelity. + If set to `"highest_fidelity"`, it will always sample at the highest_fidelity. Raises: ValueError: If the pipeline space has fidelity attributes and `ignore_fidelity` is set to `False`. Random search does not support fidelities by default. @@ -1609,7 +1609,7 @@ def neps_random_search( if pipeline_space.fidelity_attrs and ignore_fidelity is False: raise ValueError( "Random Search does not support fidelities by default." - "Consider using `ignore_fidelity=True` or `highest fidelity`" + "Consider using `ignore_fidelity=True` or `highest_fidelity`" "to always sample at max fidelity." ) if not pipeline_space.fidelity_attrs and ignore_fidelity is not False: @@ -1627,7 +1627,7 @@ def neps_random_search( ) -def _neps_bracket_optimizer( # noqa: C901 +def _neps_bracket_optimizer( # noqa: C901, PLR0915 pipeline_space: PipelineSpace, *, bracket_type: Literal["successive_halving", "hyperband", "asha", "async_hb"], @@ -1671,6 +1671,12 @@ def _neps_bracket_optimizer( # noqa: C901 brackets.Sync.create_repeating, rung_sizes=rung_sizes, ) + rung_fidelity_str = "\n".join( + f"{k}: {v}" for k, v in rung_to_fidelity.items() + ) + logging.info(f"Successive Halving Rung to Fidelity:\n{rung_fidelity_str}") + rung_sizes_str = "\n".join(f"{k}: {v}" for k, v in rung_sizes.items()) + logging.info(f"Successive Halving Rung Sizes:\n{rung_sizes_str}") case "hyperband": assert early_stopping_rate is None @@ -1682,6 +1688,16 @@ def _neps_bracket_optimizer( # noqa: C901 brackets.Hyperband.create_repeating, bracket_layouts=bracket_layouts, ) + rung_fidelity_str = "\n".join( + f"Rung {k}: Fidelity >= {v}" for k, v in rung_to_fidelity.items() + ) + logging.info(f"Hyperband Rung to Fidelity:\n{rung_fidelity_str}") + bracket_layouts_str = "\n\n".join( + f"Bracket {i}\n" + + "\n".join([f"At Rung {k}: {v} configs" for k, v in bracket.items()]) + for i, bracket in enumerate(bracket_layouts) + ) + logging.info(f"Hyperband Bracket Layouts:\n{bracket_layouts_str}") case "asha": assert early_stopping_rate is not None @@ -1695,6 +1711,12 @@ def _neps_bracket_optimizer( # noqa: C901 rungs=list(rung_to_fidelity), eta=eta, ) + rung_fidelity_str = "\n".join( + f"{k}: {v}" for k, v in rung_to_fidelity.items() + ) + logging.info(f"ASHA Rung to Fidelity:\n{rung_fidelity_str}") + rung_sizes_str = "\n".join(f"{k}: {v}" for k, v in _rung_sizes.items()) + logging.info(f"ASHA Rung Sizes:\n{rung_sizes_str}") case "async_hb": assert early_stopping_rate is None @@ -1709,6 +1731,15 @@ def _neps_bracket_optimizer( # noqa: C901 bracket_rungs=bracket_rungs, eta=eta, ) + rung_fidelity_str = "\n".join( + f"Rung {k}: Fidelity >= {v}" for k, v in rung_to_fidelity.items() + ) + logging.info(f"Async HB Rung to Fidelity:\n{rung_fidelity_str}") + bracket_rungs_str = "\n\n".join( + f"Bracket {i}\n" + "\n".join([f"At Rung {k}" for k in bracket]) + for i, bracket in enumerate(bracket_rungs) + ) + logging.info(f"Async Hyperband Bracket Rungs:\n{bracket_rungs_str}") case _: raise ValueError(f"Unknown bracket type: {bracket_type}") @@ -1768,7 +1799,7 @@ def neps_priorband( eta: The eta parameter for the algorithm. sample_prior_first: Whether to sample the prior first. If set to `"highest_fidelity"`, the prior will be sampled at the - highest fidelity, otherwise at the lowest fidelity. + highest_fidelity, otherwise at the lowest fidelity. base: The type of bracket optimizer to use. One of: - "successive_halving" - "hyperband" @@ -1802,7 +1833,7 @@ def neps_regularized_evolution( mutation_type: float | Literal["mutate_best", "crossover_top_2"] = 0.5, n_mutations: int | Literal["random", "half"] | None = "random", n_forgets: int | Literal["random", "half"] | None = None, - ignore_fidelity: bool | Literal["highest fidelity"] = False, + ignore_fidelity: bool | Literal["highest_fidelity"] = False, ) -> NePSRegularizedEvolution: return NePSRegularizedEvolution( pipeline=pipeline_space, diff --git a/neps/optimizers/neps_random_search.py b/neps/optimizers/neps_random_search.py index 008bdfc41..4997b2385 100644 --- a/neps/optimizers/neps_random_search.py +++ b/neps/optimizers/neps_random_search.py @@ -46,7 +46,7 @@ def __init__( self, pipeline: PipelineSpace, use_priors: bool = False, # noqa: FBT001, FBT002 - ignore_fidelity: bool | Literal["highest fidelity"] = False, # noqa: FBT002 + ignore_fidelity: bool | Literal["highest_fidelity"] = False, # noqa: FBT002 ): """Initialize the RandomSearch optimizer with a pipeline. @@ -61,7 +61,7 @@ def __init__( self._environment_values = {} fidelity_attrs = self._pipeline.fidelity_attrs for fidelity_name, fidelity_obj in fidelity_attrs.items(): - if ignore_fidelity == "highest fidelity": + if ignore_fidelity == "highest_fidelity": self._environment_values[fidelity_name] = fidelity_obj.upper elif not ignore_fidelity: raise ValueError( @@ -182,7 +182,7 @@ class NePSComplexRandomSearch: def __init__( self, pipeline: PipelineSpace, - ignore_fidelity: bool | Literal["highest fidelity"] = False, # noqa: FBT002 + ignore_fidelity: bool | Literal["highest_fidelity"] = False, # noqa: FBT002 ): """Initialize the ComplexRandomSearch optimizer with a pipeline. @@ -197,13 +197,13 @@ def __init__( self._environment_values = {} fidelity_attrs = self._pipeline.fidelity_attrs for fidelity_name, fidelity_obj in fidelity_attrs.items(): - if ignore_fidelity == "highest fidelity": + if ignore_fidelity == "highest_fidelity": self._environment_values[fidelity_name] = fidelity_obj.upper elif not ignore_fidelity: raise ValueError( "ComplexRandomSearch does not support fidelities by default. Consider" " using a different optimizer or setting `ignore_fidelity=True` or" - " `highest fidelity`." + " `highest_fidelity`." ) # Sample randomly from the fidelity bounds. elif isinstance(fidelity_obj.domain, Integer): diff --git a/neps/optimizers/neps_regularized_evolution.py b/neps/optimizers/neps_regularized_evolution.py index b544ad26c..71a406a91 100644 --- a/neps/optimizers/neps_regularized_evolution.py +++ b/neps/optimizers/neps_regularized_evolution.py @@ -66,7 +66,7 @@ def __init__( mutation_type: float | Literal["mutate_best", "crossover_top_2"] = 0.5, n_mutations: int | Literal["random", "half"] | None = "random", n_forgets: int | Literal["random", "half"] | None = None, - ignore_fidelity: bool | Literal["highest fidelity"] = False, # noqa: FBT002 + ignore_fidelity: bool | Literal["highest_fidelity"] = False, # noqa: FBT002 ): """Initialize the RegularizedEvolution optimizer with a pipeline. @@ -98,7 +98,7 @@ def __init__( fidelity_attrs = self._pipeline.fidelity_attrs for fidelity_name, fidelity_obj in fidelity_attrs.items(): # If the user specifically asked for the highest fidelity, use that. - if ignore_fidelity == "highest fidelity": + if ignore_fidelity == "highest_fidelity": self._environment_values[fidelity_name] = fidelity_obj.upper # If the user asked to ignore fidelities, sample a value randomly from the # domain. diff --git a/neps/optimizers/utils/grid.py b/neps/optimizers/utils/grid.py index 96730a67b..3fb5991c1 100644 --- a/neps/optimizers/utils/grid.py +++ b/neps/optimizers/utils/grid.py @@ -22,7 +22,7 @@ def make_grid( # noqa: PLR0912, PLR0915, C901 space: SearchSpace | PipelineSpace, *, size_per_numerical_hp: int = 10, - ignore_fidelity: bool | Literal["highest fidelity"] = False, + ignore_fidelity: bool | Literal["highest_fidelity"] = False, ) -> list[dict[str, Any]]: """Get a grid of configurations from the search space. @@ -52,7 +52,7 @@ def make_grid( # noqa: PLR0912, PLR0915, C901 case HPOInteger() | HPOFloat(): if hp.is_fidelity: match ignore_fidelity: - case "highest fidelity": + case "highest_fidelity": param_ranges[name] = [hp.upper] continue case True: @@ -88,7 +88,7 @@ def make_grid( # noqa: PLR0912, PLR0915, C901 "Grid search only supports categorical choices as tuples." ) elif isinstance(hp, Fidelity): - if ignore_fidelity == "highest fidelity": # type: ignore[unreachable] + if ignore_fidelity == "highest_fidelity": # type: ignore[unreachable] fid_ranges[name] = [hp.upper] continue if ignore_fidelity is True: diff --git a/neps/runtime.py b/neps/runtime.py index 71344040e..10a9e7195 100644 --- a/neps/runtime.py +++ b/neps/runtime.py @@ -382,11 +382,20 @@ def _calculate_total_resource_usage( # noqa: C901 return usage - def _check_global_stopping_criterion( + def _check_global_stopping_criterion( # noqa: C901 self, trials: Mapping[str, Trial], + log_status: bool = False, # noqa: FBT001, FBT002 ) -> tuple[str | Literal[False], ResourceUsage]: - """Evaluates if any global stopping criterion has been met.""" + """Evaluates if any global stopping criterion has been met. + + Args: + trials: The trials to evaluate the stopping criterion on. + log_status: Whether to log the current status of the budget. + + Returns: + A tuple of (stopping message or False, global resource usage). + """ worker_resource_usage = self._calculate_total_resource_usage( trials, subset_worker_id=self.worker_id, @@ -399,6 +408,60 @@ def _check_global_stopping_criterion( include_in_progress=self.settings.include_in_progress_evaluations_towards_maximum, ) + if log_status: + # Log current budget status + budget_info_parts = [] + if self.settings.evaluations_to_spend is not None: + eval_percentage = int( + ( + worker_resource_usage.evaluations + / self.settings.evaluations_to_spend + ) + * 100 + ) + budget_info_parts.append( + "Evaluations:" + f" {worker_resource_usage.evaluations}/" + f"{self.settings.evaluations_to_spend}" + f" ({eval_percentage}%)" + ) + if self.settings.fidelities_to_spend is not None: + fidelity_percentage = int( + (worker_resource_usage.fidelities / self.settings.fidelities_to_spend) + * 100 + ) + budget_info_parts.append( + "Fidelities:" + f" {worker_resource_usage.fidelities}/" + f"{self.settings.fidelities_to_spend}" + f" ({fidelity_percentage}%)" + ) + if self.settings.cost_to_spend is not None: + cost_percentage = int( + (worker_resource_usage.cost / self.settings.cost_to_spend) * 100 + ) + budget_info_parts.append( + "Cost:" + f" {worker_resource_usage.cost}/" + f"{self.settings.cost_to_spend} ({cost_percentage}%)" + ) + if self.settings.max_evaluation_time_total_seconds is not None: + time_percentage = int( + ( + worker_resource_usage.time + / self.settings.max_evaluation_time_total_seconds + ) + * 100 + ) + budget_info_parts.append( + "Time:" + f" {worker_resource_usage.time}/" + f"{self.settings.max_evaluation_time_total_seconds}s" + f" ({time_percentage}%)" + ) + + if budget_info_parts: + logger.info("Budget status - %s", " | ".join(budget_info_parts)) return_string: str | Literal[False] = False if ( @@ -504,7 +567,8 @@ def _get_next_trial(self) -> Trial | Literal["break"]: if self._requires_global_stopping_criterion: should_stop, stop_criteria = self._check_global_stopping_criterion( - trials + trials, + log_status=True, ) if should_stop is not False: logger.info(should_stop) From a5d544d52f46ffec8c80d892b6b3a5cc5f513ea8 Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 8 Dec 2025 05:28:51 +0100 Subject: [PATCH 155/156] fix: Remove fidelity value check and improve Hyperband bracket allocation logic --- neps/optimizers/neps_bracket_optimizer.py | 8 +-- neps/optimizers/utils/brackets.py | 88 ++++++++++++----------- 2 files changed, 49 insertions(+), 47 deletions(-) diff --git a/neps/optimizers/neps_bracket_optimizer.py b/neps/optimizers/neps_bracket_optimizer.py index eb0270d2f..9e7d6dc6b 100644 --- a/neps/optimizers/neps_bracket_optimizer.py +++ b/neps/optimizers/neps_bracket_optimizer.py @@ -241,12 +241,6 @@ def import_trials( for config, result in external_evaluations: fid_value = config[self.fid_name] - if fid_value not in rung_to_fid.values(): - logger.warning( - f"Fidelity value {fid_value} not in known rung fidelities " - f"{list(rung_to_fid.values())}. Skipping config: {config}" - ) - continue # create a unique key for the config without the fidelity config_key = get_trial_config_unique_key( config=config, fid_name=self.fid_name @@ -272,7 +266,7 @@ def import_trials( config_id = config_to_id[config_key] # Find the rung corresponding to the fidelity value in config - rung = next((r for r, f in rung_to_fid.items() if f == fid_value), None) + rung = max((r for r, f in rung_to_fid.items() if f <= fid_value)) trial_id = f"{config_id}_rung_{rung}" imported_configs.append( ImportedConfig( diff --git a/neps/optimizers/utils/brackets.py b/neps/optimizers/utils/brackets.py index 690c5fab2..3ef35c49d 100644 --- a/neps/optimizers/utils/brackets.py +++ b/neps/optimizers/utils/brackets.py @@ -538,53 +538,43 @@ def create_repeating( HyperbandBrackets which have each subselected the table with the corresponding rung sizes. """ - all_ids = table.index.get_level_values("id").unique() + # Group configs by their starting rung (minimum rung where they first appear) + # This ensures warmstarts at different fidelities are properly distributed + configs_by_starting_rung: dict[int, list[int]] = {} + for config_id in table.index.get_level_values("id").unique(): + config_rungs = table.loc[config_id].index.get_level_values("rung") + starting_rung = config_rungs.min() + if starting_rung not in configs_by_starting_rung: + configs_by_starting_rung[starting_rung] = [] + configs_by_starting_rung[starting_rung].append(config_id) - # Split the ids into N hyperband brackets of size K. - # K is sum of number of configurations in the lowest rung of each SH bracket - # - # For example: - # > bracket_layouts = [ - # > {0: 81, 1: 27, 2: 9, 3: 3, 4: 1}, - # > {1: 27, 2: 9, 3: 3, 4: 1}, - # > {2: 9, 3: 3, 4: 1}, - # > ... - # > ] - # - # Corresponds to: - # bracket1 - [rung_0: 81, rung_1: 27, rung_2: 9, rung_3: 3, rung_4: 1] - # bracket2 - [rung_1: 27, rung_2: 9, rung_3: 3, rung_4: 1] - # bracket3 - [rung_2: 9, rung_3: 3, rung_4: 1] - # ... - # > K = 81 + 27 + 9 + ... - # - bottom_rung_sizes = [sh[min(sh.keys())] for sh in bracket_layouts] - K = sum(bottom_rung_sizes) - N = max(len(all_ids) // K + 1, 1) + empty_slice = table.loc[[]] + hb_brackets: list[list[Sync]] = [] - hb_id_slices: list[Index] = [all_ids[i * K : (i + 1) * K] for i in range(N)] + # Assign configs to brackets based on which rung they start at + # Keep creating Hyperband cycles until all configs are allocated + while any(configs_by_starting_rung.values()): + sh_brackets: list[Sync] = [] - # Used if there is nothing for one of the rungs - empty_slice = table.loc[[]] + for layout in bracket_layouts: + bottom_rung = min(layout.keys()) + capacity_at_bottom = layout[bottom_rung] - # Now for each of our HB brackets, we need to split them into the SH brackets - hb_brackets: list[list[Sync]] = [] + # Take configs that start at this bracket's bottom rung + available_ids = configs_by_starting_rung.get(bottom_rung, []) + bracket_ids = available_ids[:capacity_at_bottom] - offsets = np.cumsum([0, *bottom_rung_sizes]) - for hb_ids in hb_id_slices: - # Split the ids into each of the respective brackets, e.g. [81, 27, 9, ...] - ids_for_each_bracket = [hb_ids[s:e] for s, e in pairwise(offsets)] + # Remove assigned configs from pool + if bottom_rung in configs_by_starting_rung: + configs_by_starting_rung[bottom_rung] = available_ids[ + capacity_at_bottom: + ] + if not configs_by_starting_rung[bottom_rung]: + del configs_by_starting_rung[bottom_rung] - # Select the data for each of the configs allocated to these sh_brackets - data_for_each_bracket = [table.loc[_ids] for _ids in ids_for_each_bracket] + # Get data for assigned configs across all their rungs + data_for_bracket = table.loc[bracket_ids] if bracket_ids else empty_slice - # Create the bracket - sh_brackets: list[Sync] = [] - for data_for_bracket, layout in zip( - data_for_each_bracket, - bracket_layouts, - strict=True, - ): rung_data = dict(iter(data_for_bracket.groupby(level="rung", sort=False))) bracket = Sync( rungs=[ @@ -602,6 +592,24 @@ def create_repeating( hb_brackets.append(sh_brackets) + # Always add one empty Hyperband cycle for new samples + sh_brackets = [] + for layout in bracket_layouts: + bracket = Sync( + rungs=[ + Rung( + value=rung, + capacity=capacity, + table=empty_slice, + ) + for rung, capacity in layout.items() + ], + is_multi_objective=is_multi_objective, + mo_selector=mo_selector, + ) + sh_brackets.append(bracket) + hb_brackets.append(sh_brackets) + return [cls(sh_brackets=sh_brackets) for sh_brackets in hb_brackets] def next(self) -> BracketAction: From 6688e61087722cce171a8e6f00a707542127e17e Mon Sep 17 00:00:00 2001 From: Meganton Date: Mon, 8 Dec 2025 05:48:04 +0100 Subject: [PATCH 156/156] fix: Refactor bracket creation logic to group configurations by starting rung for improved sampling --- neps/optimizers/utils/brackets.py | 61 +++++++++++++++---------------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/neps/optimizers/utils/brackets.py b/neps/optimizers/utils/brackets.py index 3ef35c49d..8150f7035 100644 --- a/neps/optimizers/utils/brackets.py +++ b/neps/optimizers/utils/brackets.py @@ -10,7 +10,6 @@ if TYPE_CHECKING: import pandas as pd - from pandas import Index logger = logging.getLogger(__name__) @@ -337,41 +336,39 @@ def create_repeating( Brackets which have each subselected the table with the corresponding rung sizes. """ - # Split the trials by their unique_id, taking batches of K at a time, which will - # gives us N = len(unique_is) / K brackets in total. - # - # Here, unique_id referes to the `1` in config_1_0 i.e. id = 1, rung = 0 - # - # 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 ... - # | bracket1 | bracket 2 | ... | - - # K is the number of configurations in the lowest rung, which is how many unique - # ids are needed to fill a single bracket. - K = rung_sizes[min(rung_sizes)] + # Group configs by their starting rung (minimum rung where they first appear). + # This ensures each bracket's bottom rung fills with configs that actually + # evaluate there, which is critical for proper successive halving. + bottom_rung = min(rung_sizes) + bottom_capacity = rung_sizes[bottom_rung] - # N is the number of brackets we need to create to accomodate all the unique ids. - # First we need all of the unique ids. - uniq_ids = table.index.get_level_values("id").unique() + # Group all configs by their starting rung + configs_by_starting_rung: dict[int, list[int]] = {} + for config_id in table.index.get_level_values("id").unique(): + config_rungs = table.loc[config_id].index.get_level_values("rung") + starting_rung = config_rungs.min() + if starting_rung not in configs_by_starting_rung: + configs_by_starting_rung[starting_rung] = [] + configs_by_starting_rung[starting_rung].append(config_id) - # The formula (len(uniq_ids) + K) // K is used instead of - # len(uniq_ids) // K. reason: make to ensure that even if the number of - # unique IDs is less than K, at least one bracket is created - N = (len(uniq_ids) + K) // K + # Create brackets by taking configs that start at the bottom rung + brackets_data = [] + while configs_by_starting_rung.get(bottom_rung, []): + # Take up to bottom_capacity configs that start at bottom rung + available_ids = configs_by_starting_rung[bottom_rung] + bracket_ids = available_ids[:bottom_capacity] + configs_by_starting_rung[bottom_rung] = available_ids[bottom_capacity:] - # Now we take the unique ids and split them into batches of size K - bracket_id_slices: list[Index] = [uniq_ids[i * K : (i + 1) * K] for i in range(N)] + if not configs_by_starting_rung[bottom_rung]: + del configs_by_starting_rung[bottom_rung] - # And now select the data for each of the unique_ids in the bracket - bracket_datas = [ - table.loc[bracket_unique_ids] for bracket_unique_ids in bracket_id_slices - ] + # Create data for this bracket + bracket_table = table.loc[bracket_ids] + bracket_data = dict(iter(bracket_table.groupby(level="rung", sort=False))) + brackets_data.append(bracket_data) - # This will give us a list of dictionaries, where each element `n` of the - # list is on of the `N` brackets, and the dictionary at element `n` maps - # from a rung, to the slice of the data for that rung. - all_N_bracket_datas = [ - dict(iter(d.groupby(level="rung", sort=False))) for d in bracket_datas - ] + # Always add one empty bracket for new samples + brackets_data.append({}) # Used if there is nothing for one of the rungs empty_slice = table.loc[[]] @@ -385,7 +382,7 @@ def create_repeating( is_multi_objective=is_multi_objective, mo_selector=mo_selector, ) - for bracket_data in all_N_bracket_datas + for bracket_data in brackets_data ]