diff --git a/.tools/envs/testenv-linux.yml b/.tools/envs/testenv-linux.yml index 67fab9017..f31719d7c 100644 --- a/.tools/envs/testenv-linux.yml +++ b/.tools/envs/testenv-linux.yml @@ -27,6 +27,7 @@ dependencies: - pyyaml # dev, tests - jinja2 # dev, tests - annotated-types # dev, tests + - iminuit # dev, tests - pip: # dev, tests, docs - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests diff --git a/.tools/envs/testenv-numpy.yml b/.tools/envs/testenv-numpy.yml index 2cd35c4e0..be4916859 100644 --- a/.tools/envs/testenv-numpy.yml +++ b/.tools/envs/testenv-numpy.yml @@ -25,6 +25,7 @@ dependencies: - pyyaml # dev, tests - jinja2 # dev, tests - annotated-types # dev, tests + - iminuit # dev, tests - pip: # dev, tests, docs - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests diff --git a/.tools/envs/testenv-others.yml b/.tools/envs/testenv-others.yml index 974cffec1..b6db24adb 100644 --- a/.tools/envs/testenv-others.yml +++ b/.tools/envs/testenv-others.yml @@ -25,6 +25,7 @@ dependencies: - pyyaml # dev, tests - jinja2 # dev, tests - annotated-types # dev, tests + - iminuit # dev, tests - pip: # dev, tests, docs - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests diff --git a/.tools/envs/testenv-pandas.yml b/.tools/envs/testenv-pandas.yml index 6d88f1016..3618611c0 100644 --- a/.tools/envs/testenv-pandas.yml +++ b/.tools/envs/testenv-pandas.yml @@ -25,6 +25,7 @@ dependencies: - pyyaml # dev, tests - jinja2 # dev, tests - annotated-types # dev, tests + - iminuit # dev, tests - pip: # dev, tests, docs - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index b949afa0b..02b44103c 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -3936,6 +3936,54 @@ addition to optimagic when using an NLOPT algorithm. To install nlopt run 10 * (number of parameters + 1). ``` +## Optimizers from iminuit + +optimagic supports the [IMINUIT MIGRAD Optimizer](https://iminuit.readthedocs.io/). To +use MIGRAD, you need to have +[the iminuit package](https://github.com/scikit-hep/iminuit) installed (pip install +iminuit). + +```{eval-rst} +.. dropdown:: iminuit_migrad + + .. code-block:: + + "iminuit_migrad" + + `MIGRAD `_ is + the workhorse algorithm of the MINUIT optimization suite, which has been widely used in the + high-energy physics community since 1975. The IMINUIT package is a Python interface to the + Minuit2 C++ library developed by CERN. + + Migrad uses a quasi-Newton method, updating the Hessian matrix iteratively + to guide the optimization. The algorithm adapts dynamically to challenging landscapes + using several key techniques: + + - **Quasi-Newton updates**: The Hessian is updated iteratively rather than recalculated at + each step, improving efficiency. + - **Steepest descent fallback**: When the Hessian update fails, Migrad falls back to steepest + descent with line search. + - **Box constraints handling**: Parameters with bounds are transformed internally to ensure + they remain within allowed limits. + - **Heuristics for numerical stability**: Special cases such as flat gradients or singular + Hessians are managed using pre-defined heuristics. + - **Stopping criteria based on Estimated Distance to Minimum (EDM)**: The optimization halts + when the predicted improvement becomes sufficiently small. + + For details see :cite:`JAMES1975343`. + + **Optimizer Parameters:** + + - **stopping.maxfun** (int): Maximum number of function evaluations. If reached, the optimization stops + but this is not counted as successful convergence. Function evaluations used for numerical gradient + calculations do not count toward this limit. Default is 1,000,000. + + - **n_restarts** (int): Number of times to restart the optimizer if convergence is not reached. + + - A value of 1 (the default) indicates that the optimizer will only run once, disabling the restart feature. + - Values greater than 1 specify the maximum number of restart attempts. +``` + ## References ```{eval-rst} diff --git a/docs/source/refs.bib b/docs/source/refs.bib index bdae3f4f2..45f183b84 100644 --- a/docs/source/refs.bib +++ b/docs/source/refs.bib @@ -893,4 +893,17 @@ @book{Conn2009 URL = {https://epubs.siam.org/doi/abs/10.1137/1.9780898718768}, } +@article{JAMES1975343, +title = {Minuit - a system for function minimization and analysis of the parameter errors and correlations}, +journal = {Computer Physics Communications}, +volume = {10}, +number = {6}, +pages = {343-367}, +year = {1975}, +issn = {0010-4655}, +doi = {https://doi.org/10.1016/0010-4655(75)90039-9}, +url = {https://www.sciencedirect.com/science/article/pii/0010465575900399}, +author = {F. James and M. Roos} +} + @Comment{jabref-meta: databaseType:bibtex;} diff --git a/environment.yml b/environment.yml index 34ab4604b..681a7e280 100644 --- a/environment.yml +++ b/environment.yml @@ -37,6 +37,7 @@ dependencies: - jinja2 # dev, tests - furo # dev, docs - annotated-types # dev, tests + - iminuit # dev, tests - pip: # dev, tests, docs - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests diff --git a/pyproject.toml b/pyproject.toml index 40b93ff8d..bfa2310c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ dependencies = [ "sqlalchemy>=1.3", "annotated-types", "typing-extensions", + "iminuit", ] dynamic = ["version"] keywords = [ @@ -378,5 +379,6 @@ module = [ "optimagic._version", "annotated_types", "pdbp", + "iminuit", ] ignore_missing_imports = true diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py index a892f5a51..540853192 100644 --- a/src/optimagic/algorithms.py +++ b/src/optimagic/algorithms.py @@ -14,6 +14,7 @@ from optimagic.optimization.algorithm import Algorithm from optimagic.optimizers.bhhh import BHHH from optimagic.optimizers.fides import Fides +from optimagic.optimizers.iminuit_migrad import IminuitMigrad from optimagic.optimizers.ipopt import Ipopt from optimagic.optimizers.nag_optimizers import NagDFOLS, NagPyBOBYQA from optimagic.optimizers.neldermead import NelderMeadParallel @@ -286,6 +287,7 @@ def Scalar(self) -> BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithm @dataclass(frozen=True) class BoundedGradientBasedLocalScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -840,6 +842,7 @@ def NonlinearConstrained( @dataclass(frozen=True) class BoundedGradientBasedLocalAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -889,6 +892,7 @@ def Scalar(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GradientBasedLocalScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -956,6 +960,7 @@ def Scalar(self) -> BoundedGradientBasedNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientBasedScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -1674,6 +1679,7 @@ def Scalar(self) -> BoundedLocalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedLocalScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA @@ -1943,6 +1949,7 @@ def Scalar(self) -> GlobalGradientBasedScalarAlgorithms: class GradientBasedLocalAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -1985,6 +1992,7 @@ def Scalar(self) -> GradientBasedLocalScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientBasedAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -2054,6 +2062,7 @@ def Scalar(self) -> GradientBasedNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GradientBasedScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -2577,6 +2586,7 @@ def Scalar(self) -> GlobalParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedLocalAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA @@ -2659,6 +2669,7 @@ def Scalar(self) -> LocalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class LocalScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel @@ -2809,6 +2820,7 @@ def Scalar(self) -> BoundedNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA @@ -3063,6 +3075,7 @@ def Local(self) -> LeastSquaresLocalParallelAlgorithms: class GradientBasedAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -3246,6 +3259,7 @@ def Scalar(self) -> GlobalScalarAlgorithms: class LocalAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA @@ -3316,6 +3330,7 @@ def Scalar(self) -> LocalScalarAlgorithms: @dataclass(frozen=True) class BoundedAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA @@ -3451,6 +3466,7 @@ def Scalar(self) -> NonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class ScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel @@ -3625,6 +3641,7 @@ def Scalar(self) -> ParallelScalarAlgorithms: class Algorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA diff --git a/src/optimagic/config.py b/src/optimagic/config.py index d63ef54ac..c41a3f6f1 100644 --- a/src/optimagic/config.py +++ b/src/optimagic/config.py @@ -92,6 +92,14 @@ IS_NUMBA_INSTALLED = True +try: + import iminuit # noqa: F401 +except ImportError: + IS_IMINUIT_INSTALLED = False +else: + IS_IMINUIT_INSTALLED = True + + # ====================================================================================== # Check if pandas version is newer or equal to version 2.1.0 # ====================================================================================== diff --git a/src/optimagic/optimization/algo_options.py b/src/optimagic/optimization/algo_options.py index 1846ba081..2f7c6fca5 100644 --- a/src/optimagic/optimization/algo_options.py +++ b/src/optimagic/optimization/algo_options.py @@ -122,6 +122,19 @@ """ +N_RESTARTS = 1 +"""int: Number of times to restart the optimizer if convergence is not reached. + This parameter controls how many times the optimization process is restarted + in an attempt to achieve convergence. + + - A value of 1 (the default) indicates that the optimizer will only run once, + disabling the restart feature. + - Values greater than 1 specify the maximum number of restart attempts. + + Note: This is distinct from `STOPPING_MAXITER`, which limits the number of + iterations within a single optimizer run, not the number of restarts. +""" + def get_population_size(population_size, x, lower_bound=10): """Default population size for genetic algorithms.""" diff --git a/src/optimagic/optimizers/iminuit_migrad.py b/src/optimagic/optimizers/iminuit_migrad.py new file mode 100644 index 000000000..c58e0f014 --- /dev/null +++ b/src/optimagic/optimizers/iminuit_migrad.py @@ -0,0 +1,130 @@ +from dataclasses import dataclass +from typing import Optional + +import numpy as np +from numpy.typing import NDArray + +from optimagic import mark +from optimagic.config import IS_IMINUIT_INSTALLED +from optimagic.optimization.algo_options import ( + N_RESTARTS, + STOPPING_MAXFUN, +) +from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult +from optimagic.optimization.internal_optimization_problem import ( + InternalOptimizationProblem, +) +from optimagic.typing import AggregationLevel + +if IS_IMINUIT_INSTALLED: + from iminuit import Minuit + + +@mark.minimizer( + name="iminuit_migrad", + solver_type=AggregationLevel.SCALAR, + is_available=IS_IMINUIT_INSTALLED, + is_global=False, + needs_jac=True, + needs_hess=False, + supports_parallelism=False, + supports_bounds=True, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, +) +@dataclass(frozen=True) +class IminuitMigrad(Algorithm): + stopping_maxfun: int = STOPPING_MAXFUN + n_restarts: int = N_RESTARTS + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, params: NDArray[np.float64] + ) -> InternalOptimizeResult: + def wrapped_objective(x: NDArray[np.float64]) -> float: + return float(problem.fun(x)) + + m = Minuit(wrapped_objective, params, grad=problem.jac) + + bounds = _convert_bounds_to_minuit_limits( + problem.bounds.lower, problem.bounds.upper + ) + + for i, (lower, upper) in enumerate(bounds): + if lower is not None or upper is not None: + m.limits[i] = (lower, upper) + + m.migrad( + ncall=self.stopping_maxfun, + iterate=self.n_restarts, + ) + + res = _process_minuit_result(m) + return res + + +def _process_minuit_result(minuit_result: Minuit) -> InternalOptimizeResult: + """Convert iminuit result to Optimagic's internal result format.""" + + res = InternalOptimizeResult( + x=np.array(minuit_result.values), + fun=minuit_result.fval, + success=minuit_result.valid, + message=repr(minuit_result.fmin), + n_fun_evals=minuit_result.nfcn, + n_jac_evals=minuit_result.ngrad, + n_hess_evals=None, + n_iterations=minuit_result.nfcn, + status=None, + jac=None, + hess=None, + hess_inv=np.array(minuit_result.covariance), + max_constraint_violation=None, + info=None, + history=None, + ) + return res + + +def _convert_bounds_to_minuit_limits( + lower_bounds: Optional[NDArray[np.float64]], + upper_bounds: Optional[NDArray[np.float64]], +) -> list[tuple[Optional[float], Optional[float]]]: + """Convert optimization bounds to Minuit-compatible limit format. + + Transforms numpy arrays of bounds into List of tuples as expected by iminuit. + Handles special values like np.inf, -np.inf, and np.nan by converting + them to None where appropriate, as required by Minuit's limits API. + + Parameters + ---------- + lower_bounds : Optional[NDArray[np.float64]] + Array of lower bounds for parameters. + upper_bounds : Optional[NDArray[np.float64]] + Array of upper bounds for parameters. + + Returns + ------- + list[tuple[Optional[float], Optional[float]]] + List of (lower, upper) limit tuples in Minuit format, where: + - None indicates unbounded (equivalent to infinity) + - Float values represent actual bounds + + Notes + ----- + Minuit expects bounds as tuples of (lower, upper) where: + - `None` indicates no bound (equivalent to -inf or +inf) + - A finite float value indicates a specific bound + - Bounds can be asymmetric (e.g., one side bounded, one side not) + + """ + if lower_bounds is None or upper_bounds is None: + return [] + + return [ + ( + None if np.isneginf(lower) or np.isnan(lower) else float(lower), + None if np.isposinf(upper) or np.isnan(upper) else float(upper), + ) + for lower, upper in zip(lower_bounds, upper_bounds, strict=True) + ] diff --git a/tests/optimagic/optimizers/test_iminuit_migrad.py b/tests/optimagic/optimizers/test_iminuit_migrad.py new file mode 100644 index 000000000..48e435ef4 --- /dev/null +++ b/tests/optimagic/optimizers/test_iminuit_migrad.py @@ -0,0 +1,95 @@ +"""Test suite for the iminuit migrad optimizer.""" + +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal as aaae + +from optimagic.config import IS_IMINUIT_INSTALLED +from optimagic.optimization.optimize import minimize +from optimagic.optimizers.iminuit_migrad import ( + IminuitMigrad, + _convert_bounds_to_minuit_limits, +) + + +def sphere(x): + return (x**2).sum() + + +def sphere_grad(x): + return 2 * x + + +def test_convert_bounds_unbounded(): + """Test converting unbounded bounds.""" + lower = np.array([-np.inf, -np.inf]) + upper = np.array([np.inf, np.inf]) + limits = _convert_bounds_to_minuit_limits(lower, upper) + + assert len(limits) == 2 + assert limits[0] == (None, None) + assert limits[1] == (None, None) + + +def test_convert_bounds_lower_only(): + """Test converting lower bounds only.""" + lower = np.array([1.0, 2.0]) + upper = np.array([np.inf, np.inf]) + limits = _convert_bounds_to_minuit_limits(lower, upper) + + assert len(limits) == 2 + assert limits[0] == (1.0, None) + assert limits[1] == (2.0, None) + + +def test_convert_bounds_upper_only(): + """Test converting upper bounds only.""" + lower = np.array([-np.inf, -np.inf]) + upper = np.array([1.0, 2.0]) + limits = _convert_bounds_to_minuit_limits(lower, upper) + + assert len(limits) == 2 + assert limits[0] == (None, 1.0) + assert limits[1] == (None, 2.0) + + +def test_convert_bounds_two_sided(): + """Test converting two-sided bounds.""" + lower = np.array([1.0, -2.0]) + upper = np.array([2.0, -1.0]) + limits = _convert_bounds_to_minuit_limits(lower, upper) + + assert len(limits) == 2 + assert limits[0] == (1.0, 2.0) + assert limits[1] == (-2.0, -1.0) + + +def test_convert_bounds_mixed(): + """Test converting mixed bounds (some infinite, some finite).""" + lower = np.array([-np.inf, 0.0, 1.0]) + upper = np.array([1.0, np.inf, 2.0]) + limits = _convert_bounds_to_minuit_limits(lower, upper) + + assert len(limits) == 3 + assert limits[0] == (None, 1.0) + assert limits[1] == (0.0, None) + assert limits[2] == (1.0, 2.0) + + +@pytest.mark.skipif(not IS_IMINUIT_INSTALLED, reason="iminuit not installed.") +def test_iminuit_migrad(): + """Test basic optimization with sphere function.""" + x0 = np.array([1.0, 2.0, 3.0]) + algorithm = IminuitMigrad() + + res = minimize( + fun=sphere, + jac=sphere_grad, + algorithm=algorithm, + x0=x0, + ) + + assert res.success + aaae(res.x, np.zeros(3), decimal=6) + assert res.n_fun_evals > 0 + assert res.n_jac_evals > 0