diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dc589c466..0330d22e3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,6 +29,7 @@ repos: additional_dependencies: - hatchling - ruff + - iminuit - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: diff --git a/.tools/envs/testenv-linux.yml b/.tools/envs/testenv-linux.yml index ec4b969f9..33c2f5afe 100644 --- a/.tools/envs/testenv-linux.yml +++ b/.tools/envs/testenv-linux.yml @@ -33,6 +33,7 @@ dependencies: - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests + - nevergrad # dev, tests - kaleido # dev, tests - pandas-stubs # dev, tests - types-cffi # dev, tests diff --git a/.tools/envs/testenv-numpy.yml b/.tools/envs/testenv-numpy.yml index 9f9fa7d0f..a0b66b7c2 100644 --- a/.tools/envs/testenv-numpy.yml +++ b/.tools/envs/testenv-numpy.yml @@ -31,6 +31,7 @@ dependencies: - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests + - nevergrad # dev, tests - kaleido # dev, tests - types-cffi # dev, tests - types-openpyxl # dev, tests diff --git a/.tools/envs/testenv-others.yml b/.tools/envs/testenv-others.yml index ce9490b7f..40b131716 100644 --- a/.tools/envs/testenv-others.yml +++ b/.tools/envs/testenv-others.yml @@ -31,6 +31,7 @@ dependencies: - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests + - nevergrad # dev, tests - kaleido # dev, tests - pandas-stubs # dev, tests - types-cffi # dev, tests diff --git a/.tools/envs/testenv-pandas.yml b/.tools/envs/testenv-pandas.yml index 7b342240b..163ccf810 100644 --- a/.tools/envs/testenv-pandas.yml +++ b/.tools/envs/testenv-pandas.yml @@ -31,6 +31,7 @@ dependencies: - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests + - nevergrad # dev, tests - kaleido # dev, tests - types-cffi # dev, tests - types-openpyxl # dev, tests diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index d9e43a004..0d2b12b79 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -3984,8 +3984,6 @@ iminuit). - Values greater than 1 specify the maximum number of restart attempts. ``` -(nevergrad-algorithms)= - ## Nevergrad Optimizers optimagic supports some algorithms from the @@ -4041,6 +4039,68 @@ these optimizers, you need to have for speed. Default is False. - **special_speed_quasi_opp_init** (bool): Whether to use special quasi-opposition initialization for speed. Default is False. + +``` + +```{eval-rst} +.. dropdown:: nevegrad_cmaes + + .. code-block:: + + "nevergrad_cmaes" + + The Covariance Matrix Adaptation Evolution Strategy (CMA-ES) is a stochastic derivative-free numerical optimization algorithm for difficult (non-convex, ill-conditioned, multi-modal, rugged, noisy) optimization problems in continuous search spaces. + + The version available through nevergrad wraps an external implementation `pycma `_. + + The original method can be found in + :cite:`Hansen2023`. + + The fast implementation relies on fcmaes which can be installed with `pip install fcmaes`. + + **Optimizer Parameters:** + + - **scale** (float): Scale of the search. + + - **elitist** (bool): Whether to switch to elitist mode, i.e., `+` mode instead of `,` mode, where the best point in the population is always retained. + - **population_size** (Optional[int]): Population size. Should be + + .. math:: + + \text{population_size} = n \times \text{num_workers} \quad \text{for integer } n \geq 1 + + Default is + + .. math:: + + \max\left( \text{num_workers},\ 4 + \left\lfloor 3 \cdot \log(\text{dimension}) \right\rfloor \right) + + - **popsize_factor** (float): Factor used in the formula for computing the population size. + - Default is `3.0`. + + - **diagonal** (bool): Use the diagonal version of CMA, which is advised for high-dimensional problems. + + - **high_speed** (bool): Use a metamodel for recommendation to speed up optimization. + + - **use_fast_implementation** (bool): Use the fast CMA-ES implementation. + - Cannot be used with `diagonal=True`. + - Produces equivalent results and is preferable for high dimensions or when objective function evaluations are fast. + + - **stopping.maxfun** (int): The maximum number of criterion + evaluations. + - **stopping.maxiter** (int): The maximum number of iterations. + - **convergence_ftol_abs** (float): stopping criteria on the f tolerance. + - **convergence_ftol_rel** (float): relative stopping criteria on the f tolerance. + - **convergence_xtol_abs** (float): stopping criteria on the x tolerance. + + - **seed** (int): seed used by the internal random number generator. + + - **learning_rate_rank_one_update** (float): Multiplier for the rank-one update learning rate of the covariance matrix. + - Default is `1.0`. + + - **learning_rate_rank_mu_update** (float): Multiplier for the rank-mu update learning rate of the covariance matrix. + - Default is `1.0`. + ``` ## References diff --git a/docs/source/refs.bib b/docs/source/refs.bib index 6a52fc279..0115cff27 100644 --- a/docs/source/refs.bib +++ b/docs/source/refs.bib @@ -906,6 +906,17 @@ @article{JAMES1975343 author = {F. James and M. Roos} } + +@misc{Hansen2023, +title={The CMA Evolution Strategy: A Tutorial}, +author={Nikolaus Hansen}, +year={2023}, +eprint={1604.00772}, +archivePrefix={arXiv}, +primaryClass={cs.LG}, +url={https://arxiv.org/abs/1604.00772}, +} + @InProceedings{Kennedy1995, author={Kennedy, J. and Eberhart, R.}, booktitle={Proceedings of ICNN'95 - International Conference on Neural Networks}, diff --git a/environment.yml b/environment.yml index 80435b8d7..74c598e51 100644 --- a/environment.yml +++ b/environment.yml @@ -43,6 +43,7 @@ dependencies: - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests + - nevergrad # dev, tests - kaleido # dev, tests - pre-commit>=4 # dev - -e . # dev diff --git a/pyproject.toml b/pyproject.toml index ce6707e6e..bb82c72a8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,6 +17,7 @@ dependencies = [ "annotated-types", "typing-extensions", "iminuit", + "nevergrad", ] dynamic = ["version"] keywords = [ @@ -350,6 +351,7 @@ module = [ "cyipopt", "nlopt", "bokeh", + "nevergrad", "bokeh.layouts", "bokeh.models", "bokeh.plotting", diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py index 588514e95..1fb759001 100644 --- a/src/optimagic/algorithms.py +++ b/src/optimagic/algorithms.py @@ -18,7 +18,7 @@ from optimagic.optimizers.ipopt import Ipopt from optimagic.optimizers.nag_optimizers import NagDFOLS, NagPyBOBYQA from optimagic.optimizers.neldermead import NelderMeadParallel -from optimagic.optimizers.nevergrad_optimizers import NevergradPSO +from optimagic.optimizers.nevergrad_optimizers import NevergradCMAES, NevergradPSO from optimagic.optimizers.nlopt_optimizers import ( NloptBOBYQA, NloptCCSAQ, @@ -172,6 +172,7 @@ def Scalar( @dataclass(frozen=True) class BoundedGlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -366,6 +367,7 @@ def Scalar(self) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithm @dataclass(frozen=True) class BoundedGlobalGradientFreeScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect @@ -406,6 +408,7 @@ def Parallel(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalGradientFreeParallelAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -462,6 +465,7 @@ def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorith @dataclass(frozen=True) class GlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -610,6 +614,7 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorit @dataclass(frozen=True) class BoundedGradientFreeParallelScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -705,6 +710,7 @@ def Scalar(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalParallelScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -1032,6 +1038,7 @@ def Local(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalGradientFreeAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect @@ -1096,6 +1103,7 @@ def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect @@ -1140,6 +1148,7 @@ def Parallel(self) -> GlobalGradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeParallelAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -1306,6 +1315,7 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeScalarAlgorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -1378,6 +1388,7 @@ def Parallel(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeParallelAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco @@ -1460,6 +1471,7 @@ def Scalar(self) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class GradientFreeParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -1529,6 +1541,7 @@ def Scalar(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect @@ -1578,6 +1591,7 @@ def Parallel(self) -> BoundedGlobalParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalParallelAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -1647,6 +1661,7 @@ def Scalar(self) -> GlobalNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalParallelScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -1882,6 +1897,7 @@ def Scalar(self) -> BoundedNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedParallelScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -2141,6 +2157,7 @@ def Local(self) -> GradientBasedLikelihoodLocalAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect @@ -2229,6 +2246,7 @@ def Scalar(self) -> GradientFreeLocalScalarAlgorithms: class BoundedGradientFreeAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -2326,6 +2344,7 @@ def Scalar(self) -> GradientFreeNonlinearConstrainedScalarAlgorithms: class GradientFreeScalarAlgorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -2407,6 +2426,7 @@ def Parallel(self) -> GradientFreeLeastSquaresParallelAlgorithms: @dataclass(frozen=True) class GradientFreeParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco @@ -2447,6 +2467,7 @@ def Scalar(self) -> GradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect @@ -2529,6 +2550,7 @@ def Scalar(self) -> GlobalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GlobalScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect @@ -2582,6 +2604,7 @@ def Parallel(self) -> GlobalParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalParallelAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -2847,6 +2870,7 @@ class BoundedScalarAlgorithms(AlgoSelection): iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ @@ -2948,6 +2972,7 @@ def Parallel(self) -> BoundedLeastSquaresParallelAlgorithms: @dataclass(frozen=True) class BoundedParallelAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco @@ -3050,6 +3075,7 @@ def Scalar(self) -> NonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class ParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -3158,6 +3184,7 @@ class GradientFreeAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -3229,6 +3256,7 @@ def Scalar(self) -> GradientFreeScalarAlgorithms: @dataclass(frozen=True) class GlobalAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect @@ -3363,6 +3391,7 @@ class BoundedAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ @@ -3500,6 +3529,7 @@ class ScalarAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ @@ -3629,6 +3659,7 @@ def Local(self) -> LikelihoodLocalAlgorithms: @dataclass(frozen=True) class ParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco @@ -3678,6 +3709,7 @@ class Algorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ diff --git a/src/optimagic/config.py b/src/optimagic/config.py index 643a6f663..f2f69b088 100644 --- a/src/optimagic/config.py +++ b/src/optimagic/config.py @@ -54,6 +54,13 @@ else: IS_PYGMO_INSTALLED = True +try: + import nevergrad as ng # noqa: F401 +except ImportError: + IS_NEVERGRAD_INSTALLED = False +else: + IS_NEVERGRAD_INSTALLED = True + try: import cyipopt # noqa: F401 except ImportError: diff --git a/src/optimagic/optimizers/nevergrad_optimizers.py b/src/optimagic/optimizers/nevergrad_optimizers.py index 10003a0b7..cfae2cd10 100644 --- a/src/optimagic/optimizers/nevergrad_optimizers.py +++ b/src/optimagic/optimizers/nevergrad_optimizers.py @@ -2,7 +2,7 @@ import math from dataclasses import dataclass -from typing import Literal +from typing import Any, Literal import numpy as np from numpy.typing import NDArray @@ -10,16 +10,28 @@ from optimagic import mark from optimagic.config import IS_NEVERGRAD_INSTALLED from optimagic.exceptions import NotInstalledError -from optimagic.optimization.algo_options import STOPPING_MAXFUN_GLOBAL +from optimagic.optimization.algo_options import STOPPING_MAXFUN_GLOBAL, STOPPING_MAXITER from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) -from optimagic.typing import AggregationLevel, PositiveInt +from optimagic.typing import ( + AggregationLevel, + Callable, + NonNegativeFloat, + PositiveInt, +) if IS_NEVERGRAD_INSTALLED: import nevergrad as ng +NEVERGRAD_NOT_INSTALLED_ERROR = ( + "This optimizer requires the 'nevergrad' package to be installed. " + "You can install it with `pip install nevergrad`. " + "Visit https://facebookresearch.github.io/nevergrad/getting_started.html " + "for more detailed installation instructions." +) + @mark.minimizer( name="nevergrad_pso", @@ -40,7 +52,7 @@ class NevergradPSO(Algorithm): population_size: int | None = None n_cores: int = 1 seed: int | None = None - stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL * 2 # imprecise algorithm + stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL inertia: float = 0.5 / math.log(2.0) cognitive: float = 0.5 + math.log(2.0) social: float = 0.5 + math.log(2.0) @@ -52,23 +64,9 @@ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: - raise NotInstalledError( - "The nevergrad_pso optimizer requires the 'nevergrad' package to be " - "installed. You can install it with `pip install nevergrad`. " - "Visit https://facebookresearch.github.io/nevergrad/getting_started.html" - " for more detailed installation instructions." - ) - - instrum = ng.p.Instrumentation( - ng.p.Array( - shape=x0.shape, lower=problem.bounds.lower, upper=problem.bounds.upper - ) - ) - - if self.seed is not None: - instrum.random_state.seed(self.seed) + raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) - optimizer = ng.optimizers.ConfPSO( + raw_optimizer = ng.optimizers.ConfPSO( transform=self.transform, popsize=self.population_size, omega=self.inertia, @@ -77,34 +75,134 @@ def _solve_internal_problem( qo=self.quasi_opp_init, sqo=self.speed_quasi_opp_init, so=self.special_speed_quasi_opp_init, - )( - parametrization=instrum, - budget=self.stopping_maxfun, - num_workers=self.n_cores, ) - while optimizer.num_ask < self.stopping_maxfun: - x_list = [ - optimizer.ask() - for _ in range( - min(self.n_cores, self.stopping_maxfun - optimizer.num_ask) - ) - ] - losses = problem.batch_fun( - [x.value[0][0] for x in x_list], n_cores=self.n_cores - ) - for x, loss in zip(x_list, losses, strict=True): - optimizer.tell(x, loss) - - recommendation = optimizer.provide_recommendation() - - result = InternalOptimizeResult( - x=recommendation.value[0][0], - fun=recommendation.loss, - success=True, - n_fun_evals=optimizer.num_ask, - n_jac_evals=0, - n_hess_evals=0, + res = _nevergrad_internal( + problem=problem, + x0=x0, + raw_optimizer=raw_optimizer, + stopping_maxfun=self.stopping_maxfun, + n_cores=self.n_cores, + seed=self.seed, ) - return result + return res + + +@mark.minimizer( + name="nevergrad_cmaes", + solver_type=AggregationLevel.SCALAR, + is_available=IS_NEVERGRAD_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + supports_parallelism=True, + supports_bounds=True, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, +) +@dataclass(frozen=True) +class NevergradCMAES(Algorithm): + scale: NonNegativeFloat = 1.0 + seed: int | None = None + population_size: int | None = None + elitist: bool = False + diagonal: bool = False + use_fast_implementation: bool = False + high_speed: bool = False + n_cores: PositiveInt = 1 + stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL + stopping_maxiter: PositiveInt = STOPPING_MAXITER + learning_rate_rank_one_update: NonNegativeFloat = 1.0 + learning_rate_rank_mu_update: NonNegativeFloat = 1.0 + convergence_ftol_abs: NonNegativeFloat = 1e-11 + convergence_xtol_abs: NonNegativeFloat = 1e-11 + convergence_ftol_rel: NonNegativeFloat = 0 + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + if not IS_NEVERGRAD_INSTALLED: + raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) + + cma_options = { + "tolx": self.convergence_xtol_abs, + "tolfun": self.convergence_ftol_abs, + "tolfunrel": self.convergence_ftol_rel, + "maxiter": self.stopping_maxiter, + "CMA_rankmu": self.learning_rate_rank_mu_update, + "CMA_rankone": self.learning_rate_rank_one_update, + } + + raw_optimizer = ng.optimizers.ParametrizedCMA( + scale=self.scale, + popsize=self.population_size, + elitist=self.elitist, + diagonal=self.diagonal, + high_speed=self.high_speed, + fcmaes=self.use_fast_implementation, + inopts=cma_options, + ) + + res = _nevergrad_internal( + problem=problem, + x0=x0, + raw_optimizer=raw_optimizer, + stopping_maxfun=self.stopping_maxfun, + n_cores=self.n_cores, + seed=self.seed, + ) + + return res + + +""" helper function for nevergrad""" + + +def _nevergrad_internal( + problem: InternalOptimizationProblem, + x0: NDArray[np.float64], + n_cores: int, + raw_optimizer: Callable[..., Any], + stopping_maxfun: int, + seed: int | None, +) -> InternalOptimizeResult: + param = ng.p.Array( + init=np.clip(x0, problem.bounds.lower, problem.bounds.upper) + ).set_bounds(lower=problem.bounds.lower, upper=problem.bounds.upper) + + instrum = ng.p.Instrumentation(param) + + if seed is not None: + instrum.random_state.seed(seed) + + optimizer = raw_optimizer( + parametrization=instrum, budget=stopping_maxfun, num_workers=n_cores + ) + + while optimizer.num_ask < stopping_maxfun: + x_list = [ + optimizer.ask() + for _ in range(min(n_cores, stopping_maxfun - optimizer.num_ask)) + ] + losses = problem.batch_fun([x.value[0][0] for x in x_list], n_cores=n_cores) + for x, loss in zip(x_list, losses, strict=True): + optimizer.tell(x, loss) + + recommendation = optimizer.provide_recommendation() + best_x = recommendation.value[0][0] + loss = recommendation.loss + if loss is None: + loss = problem.fun(best_x) + + result = InternalOptimizeResult( + x=best_x, + fun=loss, + success=True, + n_fun_evals=optimizer.num_ask, + n_jac_evals=0, + n_hess_evals=0, + ) + + return result