diff --git a/.tools/envs/testenv-linux.yml b/.tools/envs/testenv-linux.yml index f31719d7c..ec4b969f9 100644 --- a/.tools/envs/testenv-linux.yml +++ b/.tools/envs/testenv-linux.yml @@ -29,6 +29,7 @@ dependencies: - annotated-types # dev, tests - iminuit # dev, tests - pip: # dev, tests, docs + - nevergrad # dev, tests - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests diff --git a/.tools/envs/testenv-numpy.yml b/.tools/envs/testenv-numpy.yml index be4916859..9f9fa7d0f 100644 --- a/.tools/envs/testenv-numpy.yml +++ b/.tools/envs/testenv-numpy.yml @@ -27,6 +27,7 @@ dependencies: - annotated-types # dev, tests - iminuit # dev, tests - pip: # dev, tests, docs + - nevergrad # dev, tests - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests diff --git a/.tools/envs/testenv-others.yml b/.tools/envs/testenv-others.yml index b6db24adb..ce9490b7f 100644 --- a/.tools/envs/testenv-others.yml +++ b/.tools/envs/testenv-others.yml @@ -27,6 +27,7 @@ dependencies: - annotated-types # dev, tests - iminuit # dev, tests - pip: # dev, tests, docs + - nevergrad # dev, tests - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests diff --git a/.tools/envs/testenv-pandas.yml b/.tools/envs/testenv-pandas.yml index 3618611c0..7b342240b 100644 --- a/.tools/envs/testenv-pandas.yml +++ b/.tools/envs/testenv-pandas.yml @@ -27,6 +27,7 @@ dependencies: - annotated-types # dev, tests - iminuit # dev, tests - pip: # dev, tests, docs + - nevergrad # dev, tests - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index 02b44103c..d9e43a004 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -3984,6 +3984,65 @@ iminuit). - Values greater than 1 specify the maximum number of restart attempts. ``` +(nevergrad-algorithms)= + +## Nevergrad Optimizers + +optimagic supports some algorithms from the +[Nevergrad](https://facebookresearch.github.io/nevergrad/index.html) library. To use +these optimizers, you need to have +[the nevergrad package](https://github.com/facebookresearch/nevergrad) installed. +(`pip install nevergrad`). + +```{eval-rst} +.. dropdown:: nevergrad_pso + + .. code-block:: + + "nevergrad_pso" + + Minimize a scalar function using the Particle Swarm Optimization (PSO) algorithm. + + The Particle Swarm Optimization algorithm was originally proposed by + :cite:`Kennedy1995`. The implementation in Nevergrad is based on + :cite:`Zambrano2013`. + + Particle Swarm Optimization (PSO) solves a problem by having a population of + candidate solutions, here dubbed particles, and moving these particles around in the + search-space according to simple mathematical formulae over the particle's position + and velocity. Each particle's movement is influenced by its local best known + position (termed "cognitive" component), but is also guided toward the best known + positions (termed "social" component) in the search-space, which are updated as + better positions are found by other particles. This is expected to move the swarm + toward the best solutions. + + - **transform** (str): The transform to use to map from PSO optimization space to + R-space. Available options are: + - "arctan" (default) + - "identity" + - "gaussian" + - **population_size** (int): Population size of the particle swarm. + - **n_cores** (int): Number of cores to use. + - **seed** (int): Seed used by the internal random number generator. + - **stopping.maxfun** (int): Maximum number of function evaluations. + - **inertia** (float): Inertia weight. Denoted by :math:`\omega`. + Default is 0.7213475204444817. To prevent divergence, the value must be smaller + than 1. It controls the influence of the particle's previous velocity on its + movement. + - **cognitive** (float): Cognitive coefficient. Denoted by :math:`\phi_p`. + Default is 1.1931471805599454. Typical values range from 1.0 to 3.0. It controls + the influence of its own best known position on the particle's movement. + - **social** (float): Social coefficient. Denoted by :math:`\phi_g`. + Default is 1.1931471805599454. Typical values range from 1.0 to 3.0. It controls + the influence of the swarm's best known position on the particle's movement. + - **quasi_opp_init** (bool): Whether to use quasi-opposition initialization. + Default is False. + - **speed_quasi_opp_init** (bool): Whether to use quasi-opposition initialization + for speed. Default is False. + - **special_speed_quasi_opp_init** (bool): Whether to use special quasi-opposition + initialization for speed. Default is False. +``` + ## References ```{eval-rst} diff --git a/docs/source/refs.bib b/docs/source/refs.bib index 45f183b84..6a52fc279 100644 --- a/docs/source/refs.bib +++ b/docs/source/refs.bib @@ -906,4 +906,25 @@ @article{JAMES1975343 author = {F. James and M. Roos} } +@InProceedings{Kennedy1995, + author={Kennedy, J. and Eberhart, R.}, + booktitle={Proceedings of ICNN'95 - International Conference on Neural Networks}, + title={Particle swarm optimization}, + year={1995}, + volume={4}, + pages={1942-1948 vol.4}, + keywords={Particle swarm optimization;Birds;Educational institutions;Marine animals;Testing;Humans;Genetic algorithms;Optimization methods;Artificial neural networks;Performance evaluation}, + doi={10.1109/ICNN.1995.488968}, +} + +@InProceedings{Zambrano2013, + author = {Zambrano-Bigiarini, Mauricio and Clerc, Maurice and Rojas, Rodrigo}, + booktitle = {2013 IEEE Congress on Evolutionary Computation}, + title = {Standard Particle Swarm Optimisation 2011 at CEC-2013: A baseline for future PSO improvements}, + year = {2013}, + pages = {2337-2344}, + keywords = {Optimization;Standards;Benchmark testing;Topology;Algorithm design and analysis;Convergence;Equations;particle swarm optimization;SPSO-2011;CEC-2013;random topology;rotational invariance;benchmark testing;evolutionary computation;optimization}, + doi = {10.1109/CEC.2013.6557848}, +} + @Comment{jabref-meta: databaseType:bibtex;} diff --git a/environment.yml b/environment.yml index 681a7e280..80435b8d7 100644 --- a/environment.yml +++ b/environment.yml @@ -39,6 +39,7 @@ dependencies: - annotated-types # dev, tests - iminuit # dev, tests - pip: # dev, tests, docs + - nevergrad # dev, tests - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests diff --git a/pyproject.toml b/pyproject.toml index bfa2310c2..ce6707e6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -380,5 +380,6 @@ module = [ "annotated_types", "pdbp", "iminuit", + "nevergrad", ] ignore_missing_imports = true diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py index 540853192..588514e95 100644 --- a/src/optimagic/algorithms.py +++ b/src/optimagic/algorithms.py @@ -18,6 +18,7 @@ from optimagic.optimizers.ipopt import Ipopt from optimagic.optimizers.nag_optimizers import NagDFOLS, NagPyBOBYQA from optimagic.optimizers.neldermead import NelderMeadParallel +from optimagic.optimizers.nevergrad_optimizers import NevergradPSO from optimagic.optimizers.nlopt_optimizers import ( NloptBOBYQA, NloptCCSAQ, @@ -171,6 +172,7 @@ def Scalar( @dataclass(frozen=True) class BoundedGlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -364,6 +366,7 @@ def Scalar(self) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithm @dataclass(frozen=True) class BoundedGlobalGradientFreeScalarAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -403,6 +406,7 @@ def Parallel(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalGradientFreeParallelAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -458,6 +462,7 @@ def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorith @dataclass(frozen=True) class GlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -605,6 +610,7 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorit @dataclass(frozen=True) class BoundedGradientFreeParallelScalarAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -699,6 +705,7 @@ def Scalar(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalParallelScalarAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1025,6 +1032,7 @@ def Local(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalGradientFreeAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -1088,6 +1096,7 @@ def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeScalarAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -1131,6 +1140,7 @@ def Parallel(self) -> GlobalGradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeParallelAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1296,6 +1306,7 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeScalarAlgorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM @@ -1367,6 +1378,7 @@ def Parallel(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeParallelAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -1448,6 +1460,7 @@ def Scalar(self) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class GradientFreeParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1516,6 +1529,7 @@ def Scalar(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalScalarAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -1564,6 +1578,7 @@ def Parallel(self) -> BoundedGlobalParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalParallelAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1632,6 +1647,7 @@ def Scalar(self) -> GlobalNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalParallelScalarAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1866,6 +1882,7 @@ def Scalar(self) -> BoundedNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedParallelScalarAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2124,6 +2141,7 @@ def Local(self) -> GradientBasedLikelihoodLocalAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -2211,6 +2229,7 @@ def Scalar(self) -> GradientFreeLocalScalarAlgorithms: class BoundedGradientFreeAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM @@ -2307,6 +2326,7 @@ def Scalar(self) -> GradientFreeNonlinearConstrainedScalarAlgorithms: class GradientFreeScalarAlgorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM @@ -2387,6 +2407,7 @@ def Parallel(self) -> GradientFreeLeastSquaresParallelAlgorithms: @dataclass(frozen=True) class GradientFreeParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -2426,6 +2447,7 @@ def Scalar(self) -> GradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -2507,6 +2529,7 @@ def Scalar(self) -> GlobalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GlobalScalarAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -2559,6 +2582,7 @@ def Parallel(self) -> GlobalParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalParallelAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2823,6 +2847,7 @@ class BoundedScalarAlgorithms(AlgoSelection): iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -2923,6 +2948,7 @@ def Parallel(self) -> BoundedLeastSquaresParallelAlgorithms: @dataclass(frozen=True) class BoundedParallelAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -3024,6 +3050,7 @@ def Scalar(self) -> NonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class ParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -3131,6 +3158,7 @@ class GradientFreeAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM @@ -3201,6 +3229,7 @@ def Scalar(self) -> GradientFreeScalarAlgorithms: @dataclass(frozen=True) class GlobalAlgorithms(AlgoSelection): + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -3334,6 +3363,7 @@ class BoundedAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -3470,6 +3500,7 @@ class ScalarAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -3598,6 +3629,7 @@ def Local(self) -> LikelihoodLocalAlgorithms: @dataclass(frozen=True) class ParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -3646,6 +3678,7 @@ class Algorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA diff --git a/src/optimagic/config.py b/src/optimagic/config.py index c41a3f6f1..643a6f663 100644 --- a/src/optimagic/config.py +++ b/src/optimagic/config.py @@ -100,6 +100,14 @@ IS_IMINUIT_INSTALLED = True +try: + import nevergrad # noqa: F401 +except ImportError: + IS_NEVERGRAD_INSTALLED = False +else: + IS_NEVERGRAD_INSTALLED = True + + # ====================================================================================== # Check if pandas version is newer or equal to version 2.1.0 # ====================================================================================== diff --git a/src/optimagic/optimizers/nevergrad_optimizers.py b/src/optimagic/optimizers/nevergrad_optimizers.py new file mode 100644 index 000000000..5ec0023bf --- /dev/null +++ b/src/optimagic/optimizers/nevergrad_optimizers.py @@ -0,0 +1,110 @@ +"""Implement nevergrad optimizers.""" + +import math +from dataclasses import dataclass +from typing import Literal + +import numpy as np +from numpy.typing import NDArray + +from optimagic import mark +from optimagic.config import IS_NEVERGRAD_INSTALLED +from optimagic.exceptions import NotInstalledError +from optimagic.optimization.algo_options import STOPPING_MAXFUN_GLOBAL +from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult +from optimagic.optimization.internal_optimization_problem import ( + InternalOptimizationProblem, +) +from optimagic.typing import AggregationLevel, PositiveInt + +if IS_NEVERGRAD_INSTALLED: + import nevergrad as ng + + +@mark.minimizer( + name="nevergrad_pso", + solver_type=AggregationLevel.SCALAR, + is_available=IS_NEVERGRAD_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + supports_parallelism=True, + supports_bounds=True, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, +) +@dataclass(frozen=True) +class NevergradPSO(Algorithm): + transform: Literal["arctan", "gaussian", "identity"] = "arctan" + population_size: int | None = None + n_cores: int = 1 + seed: int | None = None + stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL + inertia: float = 0.5 / math.log(2.0) + cognitive: float = 0.5 + math.log(2.0) + social: float = 0.5 + math.log(2.0) + quasi_opp_init: bool = False + speed_quasi_opp_init: bool = False + special_speed_quasi_opp_init: bool = False + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + if not IS_NEVERGRAD_INSTALLED: + raise NotInstalledError( + "The nevergrad_pso optimizer requires the 'nevergrad' package to be " + "installed. You can install it with `pip install nevergrad`. " + "Visit https://facebookresearch.github.io/nevergrad/getting_started.html" + " for more detailed installation instructions." + ) + + instrum = ng.p.Instrumentation( + ng.p.Array( + shape=x0.shape, lower=problem.bounds.lower, upper=problem.bounds.upper + ) + ) + + if self.seed is not None: + instrum.random_state.seed(self.seed) + + optimizer = ng.optimizers.ConfPSO( + transform=self.transform, + popsize=self.population_size, + omega=self.inertia, + phip=self.cognitive, + phig=self.social, + qo=self.quasi_opp_init, + sqo=self.speed_quasi_opp_init, + so=self.special_speed_quasi_opp_init, + )( + parametrization=instrum, + budget=self.stopping_maxfun, + num_workers=self.n_cores, + ) + + while optimizer.num_ask < self.stopping_maxfun: + x_list = [ + optimizer.ask() + for _ in range( + min(self.n_cores, self.stopping_maxfun - optimizer.num_ask) + ) + ] + losses = problem.batch_fun( + [x.value[0][0] for x in x_list], n_cores=self.n_cores + ) + for x, loss in zip(x_list, losses, strict=True): + optimizer.tell(x, loss) + + recommendation = optimizer.provide_recommendation() + + result = InternalOptimizeResult( + x=recommendation.value[0][0], + fun=recommendation.loss, + success=True, + n_fun_evals=optimizer.num_ask, + n_jac_evals=0, + n_hess_evals=0, + ) + + return result